Compare commits
99 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e1dafe76dd | |||
| 1a2e63ecff | |||
| a60b32cb80 | |||
| 6b58220009 | |||
| a841057679 | |||
| ee6abea956 | |||
| 8b0acd75e0 | |||
| cea7d44717 | |||
| 0da87e75fe | |||
| 566961c7a5 | |||
| ac65258d30 | |||
| 35261e6dba | |||
| 2818f23ba5 | |||
| 88f071ea22 | |||
| bd4bc0e7f1 | |||
| 890c1d53ff | |||
| 026260e7a1 | |||
| 99fe93b7f1 | |||
| b764c53020 | |||
| 11bd7128d2 | |||
| 7cda32664b | |||
| 4c73a0ae56 | |||
| 97fc845a6a | |||
| 7d9ac0163b | |||
| d903e064e0 | |||
| e1928288fe | |||
| 6ab6412dd3 | |||
| 30b7a69d3d | |||
| ccd0a446d8 | |||
| 0418e53b3c | |||
| bad48ab59a | |||
| bbad76bb71 | |||
| 6c1bd98c14 | |||
| b95e4acaeb | |||
| c238701d09 | |||
| 60d2ac3c7a | |||
| 967ef4d56b | |||
| ad57bdda6c | |||
| a0b69d1d3d | |||
| 5df94d7e33 | |||
| 1cbe9fb7a3 | |||
| 395f736753 | |||
| 065516c5f3 | |||
| 8660abaea2 | |||
| 366f0705a0 | |||
| ccea87ca68 | |||
| 5d54883e2f | |||
| 662f65c3c8 | |||
| 259f028490 | |||
| 5db550a298 | |||
| e3c77d2906 | |||
| ba00e79253 | |||
| c1791f920e | |||
| df3803c7b7 | |||
| 384b4cbafa | |||
| 40501a7a73 | |||
| ab89b4cd4a | |||
| 48e0d55c8e | |||
| 1eba27a50a | |||
| 41206fa0e2 | |||
| 21cf1d64e5 | |||
| ae91b6f673 | |||
| f4515b5cfa | |||
| 6c57cde7f9 | |||
| 5014c1827b | |||
| f531e6aff2 | |||
| c5c110137b | |||
| 5957d9ead0 | |||
| 5675df2a44 | |||
| 00bc9142c4 | |||
| 5653ab39fc | |||
| 473dd7c940 | |||
| ee824d52ba | |||
| 7c5fba9890 | |||
| f214cb03b2 | |||
| 416c51799b | |||
| cf6f665f03 | |||
| 20da0e4dd3 | |||
| fa8c417526 | |||
| 2c65aec6c8 | |||
| 96d8e6d823 | |||
| 62b8084300 | |||
| 907aff5de4 | |||
| bc09129ec5 | |||
| cd94f638e2 | |||
| b1fe419870 | |||
| 98b724391f | |||
| 620f6ec616 | |||
| 0c6a3882a2 | |||
| a08880ae15 | |||
| f48826dfe9 | |||
| 9c3551478e | |||
| cc94e1da26 | |||
| 2b7ea5813c | |||
| 185192be67 | |||
| ae4caa96a0 | |||
| af13ae82c1 | |||
| 13503c063b | |||
| 337da59368 |
26
.github/ISSUE_TEMPLATE.md
vendored
26
.github/ISSUE_TEMPLATE.md
vendored
@ -1,27 +1,27 @@
|
||||
#### Additional Information
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
|
||||
- Version of s3fs being used (s3fs --version)
|
||||
- _example: 1.0_
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
|
||||
- Version of fuse being used (pkg-config --modversion fuse)
|
||||
- _example: 2.9.4_
|
||||
#### Version of fuse being used (pkg-config --modversion fuse)
|
||||
_example: 2.9.4_
|
||||
|
||||
- System information (uname -a)
|
||||
- _command result: uname -a_
|
||||
#### System information (uname -r)
|
||||
_command result: uname -r_
|
||||
|
||||
- Distro (cat /etc/issue)
|
||||
- _command result: result_
|
||||
#### Distro (cat /etc/issue)
|
||||
_command result: cat /etc/issue_
|
||||
|
||||
- s3fs command line used (if applicable)
|
||||
#### s3fs command line used (if applicable)
|
||||
```
|
||||
```
|
||||
- /etc/fstab entry (if applicable):
|
||||
#### /etc/fstab entry (if applicable):
|
||||
```
|
||||
```
|
||||
- s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
#### Details about issue
|
||||
### Details about issue
|
||||
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,5 +1,5 @@
|
||||
#### Relevant Issue (if applicable)
|
||||
### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
|
||||
#### Details
|
||||
### Details
|
||||
_Please describe the details of PullRequest._
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@ -27,5 +27,6 @@
|
||||
/test/.deps/
|
||||
/test/Makefile
|
||||
/test/Makefile.in
|
||||
/test/s3proxy-*
|
||||
/test/*.log
|
||||
/default_commit_hash
|
||||
|
||||
58
.travis.yml
58
.travis.yml
@ -1,17 +1,43 @@
|
||||
language: cpp
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
before_install:
|
||||
- brew update
|
||||
- brew install truncate
|
||||
- brew tap caskroom/cask
|
||||
- brew cask install osxfuse
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- brew install gnu-sed
|
||||
- sudo ln -s /usr/local/opt/gnu-sed/bin/gsed /usr/local/bin/sed
|
||||
- sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf
|
||||
- brew install cppcheck
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
45
ChangeLog
45
ChangeLog
@ -1,6 +1,50 @@
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.83 -- Dec 17, 2017
|
||||
#606 - Add Homebrew instructions
|
||||
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
|
||||
#609 - Group permission checks sometimes fail with large number of groups
|
||||
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
|
||||
#621 - Upgrade to S3Proxy 1.5.3
|
||||
#627 - Update README.md
|
||||
#630 - Added travis test on osx for #601
|
||||
#631 - Merged macosx branch into master branch #601
|
||||
#636 - Fix intermittent upload failures on macOS
|
||||
#637 - Add blurb about non-Amazon S3 implementations
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argment - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
#659 - Do not fail updating directory when removing old-style object(ref #658)
|
||||
#660 - Refixed s3fs_init message(ref #652)
|
||||
#663 - Lock FdEntity when mutating orgmeta
|
||||
#664 - auth headers insertion refactoring
|
||||
#668 - Changed .travis.yml for fixing not found gpg2 on osx
|
||||
#669 - add IBM IAM authentication support
|
||||
#670 - Fixed a bug in S3fsCurl::LocateBundle
|
||||
#671 - Add support for ECS metadata endpoint
|
||||
#675 - Reduce use of preprocessor
|
||||
#676 - Move str definition from header to implementation
|
||||
#677 - Add s3proxy to .gitignore
|
||||
#679 - README.md Addition
|
||||
#681 - Changed functions about reading passwd file
|
||||
#684 - Correct signedness warning
|
||||
#686 - remove use of jsoncpp
|
||||
#688 - Improved use of temporary files - #678
|
||||
#690 - Added option ecs description to man page
|
||||
#692 - Updated template md files for issue and pr
|
||||
#695 - fix condition for parallel download
|
||||
#697 - Fixing race condition in FdEntity::GetStats
|
||||
#699 - Fix dbglevel usage
|
||||
|
||||
Version 1.82 -- May 13, 2017
|
||||
#597 - Not fallback to HTTP - #596
|
||||
#598 - Updated ChangeLog and configure.ac for release 1.82
|
||||
|
||||
Version 1.81 -- May 13, 2017
|
||||
#426 - Updated to correct ChangeLog
|
||||
#431 - fix typo s/controll/control/
|
||||
@ -60,6 +104,7 @@ Version 1.81 -- May 13, 2017
|
||||
#590 - Updated man page for default_acl option - #567
|
||||
#593 - Backward compatible for changing default transport to HTTPS
|
||||
#594 - Check bucket at public bucket and add nocopyapi option automatically
|
||||
#595 - Updated ChangeLog and configure.ac for release 1.81
|
||||
|
||||
Version 1.80 -- May 29, 2016
|
||||
#213 - Parse ETag from copy multipart correctly
|
||||
|
||||
@ -34,6 +34,7 @@ cppcheck:
|
||||
--std=c++03 \
|
||||
-U CURLE_PEER_FAILED_VERIFICATION \
|
||||
-U P_tmpdir \
|
||||
-U ENOATTR \
|
||||
--enable=all \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unusedFunction \
|
||||
|
||||
43
README.md
43
README.md
@ -22,12 +22,12 @@ Features
|
||||
Installation
|
||||
------------
|
||||
|
||||
Ensure you have all the dependencies:
|
||||
* On Linux, ensure you have all the dependencies:
|
||||
|
||||
On Ubuntu 14.04:
|
||||
|
||||
```
|
||||
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
sudo apt-get install automake autotools-dev fuse g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
```
|
||||
|
||||
On CentOS 7:
|
||||
@ -36,7 +36,7 @@ On CentOS 7:
|
||||
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
```
|
||||
|
||||
Compile from master via the following commands:
|
||||
Then compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
@ -47,18 +47,21 @@ make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
* On Mac OS X, install via [Homebrew](http://brew.sh/):
|
||||
|
||||
```ShellSession
|
||||
$ brew cask install osxfuse
|
||||
$ brew install s3fs
|
||||
```
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Enter your S3 identity and credential in a file `/path/to/passwd`:
|
||||
Enter your S3 identity and credential in a file `/path/to/passwd` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
|
||||
```
|
||||
|
||||
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
|
||||
|
||||
```
|
||||
chmod 600 /path/to/passwd
|
||||
```
|
||||
|
||||
@ -71,19 +74,38 @@ s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -o url=http://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=http://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file:
|
||||
```
|
||||
echo SERVICEINSTANCEID:APIKEY > /path/to/passwd
|
||||
```
|
||||
The Service Instance ID is only required when using the `-o create_bucket` option.
|
||||
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
@ -126,3 +148,4 @@ License
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
Licensed under the GNU GPL version 2
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.81)
|
||||
AC_INIT(s3fs, 1.83)
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
@ -176,13 +176,13 @@ dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6])
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
|
||||
@ -190,9 +190,6 @@ If the disk free space is smaller than this value, s3fs do not use diskspace as
|
||||
.TP
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you start s3fs without specifying the url option, s3fs will check the bucket using https://s3.amazonaws.com.
|
||||
And when bucket check fails, s3fs retries the bucket check using http://s3.amazonaws.com.
|
||||
This is the function left behind for backward compatibility.
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR endpoint (default="us-east-1")
|
||||
@ -219,9 +216,15 @@ Enable to send "Content-MD5" header when uploading a object without multipart po
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
.TP
|
||||
\fB\-o\fR ecs ( default is disable )
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
.TP
|
||||
\fB\-o\fR iam_role ( default is no IAM role )
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_auth ( default is not using IBM IAM authentication )
|
||||
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
|
||||
Enable to handle the extended attribute(xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
|
||||
@ -59,7 +59,7 @@ using namespace std;
|
||||
#ifdef HAVE_CLOCK_GETTIME
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
{
|
||||
return clock_gettime(clk_id, ts);
|
||||
return clock_gettime(static_cast<clockid_t>(clk_id), ts);
|
||||
}
|
||||
#else
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
|
||||
10
src/common.h
10
src/common.h
@ -37,7 +37,7 @@
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
|
||||
static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
//
|
||||
// Debug level
|
||||
@ -100,6 +100,14 @@ enum s3fs_log_level{
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
|
||||
}
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s" fmt "%s", S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
|
||||
694
src/curl.cpp
694
src/curl.cpp
File diff suppressed because it is too large
Load Diff
40
src/curl.h
40
src/curl.h
@ -26,7 +26,7 @@
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
//----------------------------------------------
|
||||
#define MIN_MULTIPART_SIZE 5242880 // 5MB
|
||||
static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
@ -175,9 +175,11 @@ enum sse_type_t {
|
||||
};
|
||||
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
enum {
|
||||
SHARE_MUTEX_DNS = 0,
|
||||
SHARE_MUTEX_SSL_SESSION = 1,
|
||||
SHARE_MUTEX_MAX = 2,
|
||||
};
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
@ -230,12 +232,19 @@ class S3fsCurl
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
@ -266,6 +275,8 @@ class S3fsCurl
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
@ -311,7 +322,10 @@ class S3fsCurl
|
||||
bool ResetHandle(void);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
@ -341,6 +355,7 @@ class S3fsCurl
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static std::string SetDefaultAcl(const char* acl);
|
||||
static std::string GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
@ -362,13 +377,22 @@ class S3fsCurl
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool IsSetAccessKeyId(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
static bool IsSetAccessKeyID(void){
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
@ -377,6 +401,7 @@ class S3fsCurl
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent(void);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
@ -479,6 +504,7 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* d
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
@ -52,7 +52,7 @@ using namespace std;
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
#define MAX_MULTIPART_CNT 10000 // S3 multipart max count
|
||||
static const int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count
|
||||
|
||||
//
|
||||
// For cache directory top path
|
||||
@ -756,20 +756,24 @@ int FdEntity::OpenMirrorFile(void)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// make mirror file path
|
||||
char szfile[NAME_MAX + 1];
|
||||
if(NULL == tmpnam(szfile)){
|
||||
S3FS_PRN_ERR("could not get temporary file name.");
|
||||
return -EIO;
|
||||
}
|
||||
char* ppos = strrchr(szfile, '/');
|
||||
++ppos;
|
||||
mirrorpath = bupdir + "/" + ppos;
|
||||
// try to link mirror file
|
||||
while(true){
|
||||
// make random(temp) file path
|
||||
// (do not care for threading, because allowed any value returned.)
|
||||
//
|
||||
char szfile[NAME_MAX + 1];
|
||||
unsigned int seed = static_cast<unsigned int>(time(NULL));
|
||||
sprintf(szfile, "%x.tmp", rand_r(&seed));
|
||||
mirrorpath = bupdir + "/" + szfile;
|
||||
|
||||
// link mirror file to cache file
|
||||
if(-1 == link(cachepath.c_str(), mirrorpath.c_str())){
|
||||
S3FS_PRN_ERR("could not link mirror file(%s) to cache file(%s) by errno(%d).", mirrorpath.c_str(), cachepath.c_str(), errno);
|
||||
return -errno;
|
||||
// link mirror file to cache file
|
||||
if(0 == link(cachepath.c_str(), mirrorpath.c_str())){
|
||||
break;
|
||||
}
|
||||
if(EEXIST != errno){
|
||||
S3FS_PRN_ERR("could not link mirror file(%s) to cache file(%s) by errno(%d).", mirrorpath.c_str(), cachepath.c_str(), errno);
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
|
||||
// open mirror file
|
||||
@ -997,10 +1001,10 @@ bool FdEntity::OpenAndLoadAll(headers_t* pmeta, size_t* size, bool force_load)
|
||||
|
||||
bool FdEntity::GetStats(struct stat& st)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
if(-1 == fd){
|
||||
return false;
|
||||
}
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
memset(&st, 0, sizeof(struct stat));
|
||||
if(-1 == fstat(fd, &st)){
|
||||
@ -1017,9 +1021,9 @@ int FdEntity::SetMtime(time_t time)
|
||||
if(-1 == time){
|
||||
return 0;
|
||||
}
|
||||
if(-1 != fd){
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
if(-1 != fd){
|
||||
struct timeval tv[2];
|
||||
tv[0].tv_sec = time;
|
||||
tv[0].tv_usec= 0L;
|
||||
@ -1046,6 +1050,7 @@ int FdEntity::SetMtime(time_t time)
|
||||
|
||||
bool FdEntity::UpdateMtime(void)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
struct stat st;
|
||||
if(!GetStats(st)){
|
||||
return false;
|
||||
@ -1067,18 +1072,21 @@ bool FdEntity::GetSize(size_t& size)
|
||||
|
||||
bool FdEntity::SetMode(mode_t mode)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-mode"] = str(mode);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::SetUId(uid_t uid)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-uid"] = str(uid);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::SetGId(gid_t gid)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-gid"] = str(gid);
|
||||
return true;
|
||||
}
|
||||
@ -1088,6 +1096,7 @@ bool FdEntity::SetContentType(const char* path)
|
||||
if(!path){
|
||||
return false;
|
||||
}
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(string(path));
|
||||
return true;
|
||||
}
|
||||
@ -1146,7 +1155,7 @@ int FdEntity::Load(off_t start, size_t size)
|
||||
size_t over_size = (*iter)->bytes - need_load_size;
|
||||
|
||||
// download
|
||||
if(static_cast<size_t>(2 * S3fsCurl::GetMultipartSize()) < need_load_size && !nomultipart){ // default 20MB
|
||||
if(static_cast<size_t>(2 * S3fsCurl::GetMultipartSize()) <= need_load_size && !nomultipart){ // default 20MB
|
||||
// parallel request
|
||||
// Additional time is needed for large files
|
||||
time_t backup = 0;
|
||||
@ -1908,7 +1917,7 @@ size_t FdManager::SetEnsureFreeDiskSpace(size_t size)
|
||||
return old;
|
||||
}
|
||||
|
||||
fsblkcnt_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
uint64_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
{
|
||||
struct statvfs vfsbuf;
|
||||
string ctoppath;
|
||||
@ -1930,12 +1939,12 @@ fsblkcnt_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno);
|
||||
return 0;
|
||||
}
|
||||
return (vfsbuf.f_bavail * vfsbuf.f_bsize);
|
||||
return (vfsbuf.f_bavail * vfsbuf.f_frsize);
|
||||
}
|
||||
|
||||
bool FdManager::IsSafeDiskSpace(const char* path, size_t size)
|
||||
{
|
||||
fsblkcnt_t fsize = FdManager::GetFreeDiskSpace(path);
|
||||
uint64_t fsize = FdManager::GetFreeDiskSpace(path);
|
||||
return ((size + FdManager::GetEnsureFreeDiskSpace()) <= fsize);
|
||||
}
|
||||
|
||||
@ -2107,6 +2116,7 @@ FdEntity* FdManager::ExistOpen(const char* path, int existfd, bool ignore_existf
|
||||
|
||||
void FdManager::Rename(const std::string &from, const std::string &to)
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
fdent_map_t::iterator iter = fent.find(from);
|
||||
if(fent.end() != iter){
|
||||
// found
|
||||
|
||||
@ -194,7 +194,7 @@ class FdManager
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static fsblkcnt_t GetFreeDiskSpace(const char* path);
|
||||
static uint64_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
|
||||
public:
|
||||
|
||||
@ -186,11 +186,9 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
#define MD5_DIGEST_LENGTH 16
|
||||
|
||||
size_t get_md5_digest_length(void)
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
return 16;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
@ -298,11 +296,9 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
#define SHA256_DIGEST_LENGTH 32
|
||||
|
||||
size_t get_sha256_digest_length(void)
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
555
src/s3fs.cpp
555
src/s3fs.cpp
@ -58,14 +58,16 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Define
|
||||
//-------------------------------------------------------------------
|
||||
#define DIRTYPE_UNKNOWN -1
|
||||
#define DIRTYPE_NEW 0
|
||||
#define DIRTYPE_OLD 1
|
||||
#define DIRTYPE_FOLDER 2
|
||||
#define DIRTYPE_NOOBJ 3
|
||||
enum dirtype {
|
||||
DIRTYPE_UNKNOWN = -1,
|
||||
DIRTYPE_NEW = 0,
|
||||
DIRTYPE_OLD = 1,
|
||||
DIRTYPE_FOLDER = 2,
|
||||
DIRTYPE_NOOBJ = 3,
|
||||
};
|
||||
|
||||
#define IS_REPLACEDIR(type) (DIRTYPE_OLD == type || DIRTYPE_FOLDER == type || DIRTYPE_NOOBJ == type)
|
||||
#define IS_RMTYPEDIR(type) (DIRTYPE_OLD == type || DIRTYPE_FOLDER == type)
|
||||
static bool IS_REPLACEDIR(dirtype type) { return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type || DIRTYPE_NOOBJ == type; }
|
||||
static bool IS_RMTYPEDIR(dirtype type) { return DIRTYPE_OLD == type || DIRTYPE_FOLDER == type; }
|
||||
|
||||
#if !defined(ENOATTR)
|
||||
#define ENOATTR ENODATA
|
||||
@ -80,7 +82,10 @@ typedef struct incomplete_multipart_info{
|
||||
string date;
|
||||
}UNCOMP_MP_INFO;
|
||||
|
||||
typedef std::list<UNCOMP_MP_INFO> uncomp_mp_list_t;
|
||||
typedef std::list<UNCOMP_MP_INFO> uncomp_mp_list_t;
|
||||
typedef std::list<std::string> readline_t;
|
||||
typedef std::map<std::string, std::string> kvmap_t;
|
||||
typedef std::map<std::string, kvmap_t> bucketkvmap_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
@ -89,7 +94,6 @@ bool foreground = false;
|
||||
bool nomultipart = false;
|
||||
bool pathrequeststyle = false;
|
||||
bool complement_stat = false;
|
||||
bool is_changed_default_host = false; // for checking default http protocol
|
||||
std::string program_name;
|
||||
std::string service_path = "/";
|
||||
std::string host = "https://s3.amazonaws.com";
|
||||
@ -123,6 +127,8 @@ static bool is_s3fs_uid = false;// default does not set.
|
||||
static bool is_s3fs_gid = false;// default does not set.
|
||||
static bool is_s3fs_umask = false;// default does not set.
|
||||
static bool is_remove_cache = false;
|
||||
static bool is_ecs = false;
|
||||
static bool is_ibm_iam_auth = false;
|
||||
static bool is_use_xattr = false;
|
||||
static bool create_bucket = false;
|
||||
static int64_t singlepart_copy_limit = FIVE_GB;
|
||||
@ -130,6 +136,11 @@ static bool is_specified_endpoint = false;
|
||||
static int s3fs_init_deferred_exit_status = 0;
|
||||
static bool support_compat_dir = true;// default supports compatibility directory type
|
||||
|
||||
static const std::string allbucket_fields_type = ""; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static const std::string keyval_fields_type = "\t"; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static const std::string aws_accesskeyid = "AWSAccessKeyId";
|
||||
static const std::string aws_secretkey = "AWSSecretKey";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Static functions : prototype
|
||||
//-------------------------------------------------------------------
|
||||
@ -138,7 +149,8 @@ static bool set_s3fs_usr2_handler(void);
|
||||
static s3fs_log_level set_s3fs_log_level(s3fs_log_level level);
|
||||
static s3fs_log_level bumpup_s3fs_log_level(void);
|
||||
static bool is_special_name_folder_object(const char* path);
|
||||
static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta = NULL, int* pDirType = NULL);
|
||||
static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta = NULL, dirtype* pDirType = NULL);
|
||||
static int remove_old_type_dir(const string& path, dirtype type);
|
||||
static int get_object_attribute(const char* path, struct stat* pstbuf, headers_t* pmeta = NULL, bool overcheck = true, bool* pisforce = NULL, bool add_no_truncate_cache = false);
|
||||
static int check_object_access(const char* path, int mask, struct stat* pstbuf);
|
||||
static int check_object_owner(const char* path, struct stat* pstbuf);
|
||||
@ -176,8 +188,9 @@ static bool parse_xattr_keyval(const std::string& xattrpair, string& key, PXATTR
|
||||
static size_t parse_xattrs(const std::string& strxattrs, xattrs_t& xattrs);
|
||||
static std::string build_xattrs(const xattrs_t& xattrs);
|
||||
static int s3fs_utility_mode(void);
|
||||
static int s3fs_check_service(bool need_try_http = true);
|
||||
static int check_for_aws_format(void);
|
||||
static int s3fs_check_service(void);
|
||||
static int parse_passwd_file(bucketkvmap_t& resmap);
|
||||
static int check_for_aws_format(const kvmap_t& kvmap);
|
||||
static int check_passwd_file_perms(void);
|
||||
static int read_passwd_file(void);
|
||||
static int get_access_keys(void);
|
||||
@ -314,12 +327,12 @@ static bool is_special_name_folder_object(const char* path)
|
||||
// pmeta: headers map
|
||||
// pDirType: directory object type
|
||||
//
|
||||
static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta, int* pDirType)
|
||||
static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta, dirtype* pDirType)
|
||||
{
|
||||
int TypeTmp;
|
||||
dirtype TypeTmp;
|
||||
int result = -1;
|
||||
bool isforce = false;
|
||||
int* pType = pDirType ? pDirType : &TypeTmp;
|
||||
dirtype* pType = pDirType ? pDirType : &TypeTmp;
|
||||
|
||||
// Normalize new path.
|
||||
newpath = path;
|
||||
@ -392,6 +405,21 @@ static int chk_dir_object_type(const char* path, string& newpath, string& nowpat
|
||||
return result;
|
||||
}
|
||||
|
||||
static int remove_old_type_dir(const string& path, dirtype type)
|
||||
{
|
||||
if(IS_RMTYPEDIR(type)){
|
||||
S3fsCurl s3fscurl;
|
||||
int result = s3fscurl.DeleteRequest(path.c_str());
|
||||
if(0 != result && -ENOENT != result){
|
||||
return result;
|
||||
}
|
||||
// succeed removing or not found the directory
|
||||
}else{
|
||||
// nothing to do
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Get object attributes with stat cache.
|
||||
// This function is base for s3fs_getattr().
|
||||
@ -1339,7 +1367,7 @@ static int rename_directory(const char* from, const char* to)
|
||||
string basepath = strfrom + "/";
|
||||
string newpath; // should be from name(not used)
|
||||
string nowcache; // now cache path(not used)
|
||||
int DirType;
|
||||
dirtype DirType;
|
||||
bool normdir;
|
||||
MVNODE* mn_head = NULL;
|
||||
MVNODE* mn_tail = NULL;
|
||||
@ -1517,7 +1545,7 @@ static int s3fs_chmod(const char* path, mode_t mode)
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO("[path=%s][mode=%04o]", path, mode);
|
||||
|
||||
@ -1548,11 +1576,8 @@ static int s3fs_chmod(const char* path, mode_t mode)
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -1594,7 +1619,7 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
string newpath;
|
||||
string nowcache;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s][mode=%04o]", path, mode);
|
||||
|
||||
@ -1624,13 +1649,10 @@ static int s3fs_chmod_nocopy(const char* path, mode_t mode)
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
// Should rebuild all directory object
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -1674,7 +1696,7 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid)
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
||||
|
||||
@ -1706,25 +1728,13 @@ static int s3fs_chown(const char* path, uid_t uid, gid_t gid)
|
||||
return result;
|
||||
}
|
||||
|
||||
struct passwd* pwdata= getpwuid(uid);
|
||||
struct group* grdata = getgrgid(gid);
|
||||
if(pwdata){
|
||||
uid = pwdata->pw_uid;
|
||||
}
|
||||
if(grdata){
|
||||
gid = grdata->gr_gid;
|
||||
}
|
||||
|
||||
if(S_ISDIR(stbuf.st_mode) && IS_REPLACEDIR(nDirType)){
|
||||
// Should rebuild directory object(except new type)
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -1755,7 +1765,7 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
string newpath;
|
||||
string nowcache;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s][uid=%u][gid=%u]", path, (unsigned int)uid, (unsigned int)gid);
|
||||
|
||||
@ -1770,6 +1780,13 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
return result;
|
||||
}
|
||||
|
||||
if((uid_t)(-1) == uid){
|
||||
uid = stbuf.st_uid;
|
||||
}
|
||||
if((gid_t)(-1) == gid){
|
||||
gid = stbuf.st_gid;
|
||||
}
|
||||
|
||||
// Get attributes
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
result = chk_dir_object_type(path, newpath, strpath, nowcache, NULL, &nDirType);
|
||||
@ -1782,25 +1799,13 @@ static int s3fs_chown_nocopy(const char* path, uid_t uid, gid_t gid)
|
||||
return result;
|
||||
}
|
||||
|
||||
struct passwd* pwdata= getpwuid(uid);
|
||||
struct group* grdata = getgrgid(gid);
|
||||
if(pwdata){
|
||||
uid = pwdata->pw_uid;
|
||||
}
|
||||
if(grdata){
|
||||
gid = grdata->gr_gid;
|
||||
}
|
||||
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
// Should rebuild all directory object
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -1845,7 +1850,7 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2])
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO("[path=%s][mtime=%jd]", path, (intmax_t)(ts[1].tv_sec));
|
||||
|
||||
@ -1878,11 +1883,8 @@ static int s3fs_utimens(const char* path, const struct timespec ts[2])
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -1912,7 +1914,7 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
string newpath;
|
||||
string nowcache;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
S3FS_PRN_INFO1("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str());
|
||||
|
||||
@ -1946,11 +1948,8 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -3044,7 +3043,7 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value,
|
||||
string nowcache;
|
||||
headers_t meta;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
if(0 == strcmp(path, "/")){
|
||||
S3FS_PRN_ERR("Could not change mode for mount point.");
|
||||
@ -3078,11 +3077,8 @@ static int s3fs_setxattr(const char* path, const char* name, const char* value,
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -3266,7 +3262,7 @@ static int s3fs_removexattr(const char* path, const char* name)
|
||||
headers_t meta;
|
||||
xattrs_t xattrs;
|
||||
struct stat stbuf;
|
||||
int nDirType = DIRTYPE_UNKNOWN;
|
||||
dirtype nDirType = DIRTYPE_UNKNOWN;
|
||||
|
||||
if(0 == strcmp(path, "/")){
|
||||
S3FS_PRN_ERR("Could not change mode for mount point.");
|
||||
@ -3326,12 +3322,8 @@ static int s3fs_removexattr(const char* path, const char* name)
|
||||
// Need to remove old dir("dir" etc) and make new dir("dir/")
|
||||
|
||||
// At first, remove directory old object
|
||||
if(IS_RMTYPEDIR(nDirType)){
|
||||
S3fsCurl s3fscurl;
|
||||
if(0 != (result = s3fscurl.DeleteRequest(strpath.c_str()))){
|
||||
free_xattrs(xattrs);
|
||||
return result;
|
||||
}
|
||||
if(0 != (result = remove_old_type_dir(strpath, nDirType))){
|
||||
return result;
|
||||
}
|
||||
StatCache::getStatCacheData()->DelStat(nowcache);
|
||||
|
||||
@ -3376,7 +3368,7 @@ static void s3fs_exit_fuseloop(int exit_status) {
|
||||
|
||||
static void* s3fs_init(struct fuse_conn_info* conn)
|
||||
{
|
||||
S3FS_PRN_CRIT("init v%s(commit:%s) with %s", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
S3FS_PRN_INIT_INFO("init v%s(commit:%s) with %s", VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
|
||||
// cache(remove cache dirs at first)
|
||||
if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){
|
||||
@ -3750,7 +3742,7 @@ static bool check_region_error(const char* pbody, string& expectregion)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int s3fs_check_service(bool need_try_http)
|
||||
static int s3fs_check_service(void)
|
||||
{
|
||||
S3FS_PRN_INFO("check services.");
|
||||
|
||||
@ -3762,7 +3754,6 @@ static int s3fs_check_service(bool need_try_http)
|
||||
|
||||
S3fsCurl s3fscurl;
|
||||
int res;
|
||||
string bup_endpoint = endpoint;
|
||||
if(0 > (res = s3fscurl.CheckBucket())){
|
||||
// get response code
|
||||
long responseCode = s3fscurl.GetLastResponseCode();
|
||||
@ -3821,26 +3812,7 @@ static int s3fs_check_service(bool need_try_http)
|
||||
// another error
|
||||
S3FS_PRN_CRIT("unable to connect(host=%s) - result of checking service.", host.c_str());
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If using default host(https://s3.amazonaws.com), try to change https to http protocol
|
||||
// (old version default protocol) and try to connect.
|
||||
//
|
||||
if(need_try_http && !is_changed_default_host){
|
||||
// host is set second default value(http)
|
||||
S3FS_PRN_CRIT("Retry checking the bucket with HTTP(http://s3.amazonaws.com), not HTTPS(https://s3.amazonaws.com).");
|
||||
host = "http://s3.amazonaws.com";
|
||||
endpoint = bup_endpoint;
|
||||
|
||||
// check http protocol
|
||||
int result = s3fs_check_service(false);
|
||||
if(EXIT_SUCCESS == result){
|
||||
S3FS_PRN_CRIT("Switch to HTTP protocol instead of HTTPS. You should use the host or url option and specify the HTTP protocol endpoint.");
|
||||
}
|
||||
return result;
|
||||
}else{
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3856,77 +3828,136 @@ static int s3fs_check_service(bool need_try_http)
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
//
|
||||
// Read and Parse passwd file
|
||||
//
|
||||
// The line of the password file is one of the following formats:
|
||||
// (1) "accesskey:secretkey" : AWS format for default(all) access key/secret key
|
||||
// (2) "bucket:accesskey:secretkey" : AWS format for bucket's access key/secret key
|
||||
// (3) "key=value" : Content-dependent KeyValue contents
|
||||
//
|
||||
// This function sets result into bucketkvmap_t, it bucket name and key&value mapping.
|
||||
// If bucket name is empty(1 or 3 format), bucket name for mapping is set "\t" or "".
|
||||
//
|
||||
// Return: 1 - OK(could parse and set mapping etc.)
|
||||
// 0 - NG(could not read any value)
|
||||
// -1 - Should shutdown immediately
|
||||
//
|
||||
static int parse_passwd_file(bucketkvmap_t& resmap)
|
||||
{
|
||||
string line;
|
||||
size_t first_pos;
|
||||
size_t last_pos;
|
||||
readline_t linelist;
|
||||
readline_t::iterator iter;
|
||||
|
||||
// open passwd file
|
||||
ifstream PF(passwd_file.c_str());
|
||||
if(!PF.good()){
|
||||
S3FS_PRN_EXIT("could not open passwd file : %s", passwd_file.c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
// read each line
|
||||
while(getline(PF, line)){
|
||||
line = trim(line);
|
||||
if(0 == line.size()){
|
||||
continue;
|
||||
}
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(string::npos != line.find_first_of(" \t")){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character.");
|
||||
return -1;
|
||||
}
|
||||
if(0 == line.find_first_of("[")){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character.");
|
||||
return -1;
|
||||
}
|
||||
linelist.push_back(line);
|
||||
}
|
||||
|
||||
// read '=' type
|
||||
kvmap_t kv;
|
||||
for(iter = linelist.begin(); iter != linelist.end(); ++iter){
|
||||
first_pos = iter->find_first_of("=");
|
||||
if(first_pos == string::npos){
|
||||
continue;
|
||||
}
|
||||
// formatted by "key=val"
|
||||
string key = trim(iter->substr(0, first_pos));
|
||||
string val = trim(iter->substr(first_pos + 1, string::npos));
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
if(kv.end() != kv.find(key)){
|
||||
S3FS_PRN_WARN("same key name(%s) found in passwd file, skip this.", key.c_str());
|
||||
continue;
|
||||
}
|
||||
kv[key] = val;
|
||||
}
|
||||
// set special key name
|
||||
resmap[string(keyval_fields_type)] = kv;
|
||||
|
||||
// read ':' type
|
||||
for(iter = linelist.begin(); iter != linelist.end(); ++iter){
|
||||
first_pos = iter->find_first_of(":");
|
||||
last_pos = iter->find_last_of(":");
|
||||
if(first_pos == string::npos){
|
||||
continue;
|
||||
}
|
||||
string bucket;
|
||||
string accesskey;
|
||||
string secret;
|
||||
if(first_pos != last_pos){
|
||||
// formatted by "bucket:accesskey:secretkey"
|
||||
bucket = trim(iter->substr(0, first_pos));
|
||||
accesskey = trim(iter->substr(first_pos + 1, last_pos - first_pos - 1));
|
||||
secret = trim(iter->substr(last_pos + 1, string::npos));
|
||||
}else{
|
||||
// formatted by "accesskey:secretkey"
|
||||
bucket = allbucket_fields_type;
|
||||
accesskey = trim(iter->substr(0, first_pos));
|
||||
secret = trim(iter->substr(first_pos + 1, string::npos));
|
||||
}
|
||||
if(resmap.end() != resmap.find(bucket)){
|
||||
S3FS_PRN_EXIT("same bucket(%s) passwd setting found in passwd file.", ("" == bucket ? "default" : bucket.c_str()));
|
||||
return -1;
|
||||
}
|
||||
kv.clear();
|
||||
kv[string(aws_accesskeyid)] = accesskey;
|
||||
kv[string(aws_secretkey)] = secret;
|
||||
resmap[bucket] = kv;
|
||||
}
|
||||
return (resmap.empty() ? 0 : 1);
|
||||
}
|
||||
|
||||
//
|
||||
// Return: 1 - OK(could read and set accesskey etc.)
|
||||
// 0 - NG(could not read)
|
||||
// -1 - Should shutdown immediately
|
||||
static int check_for_aws_format(void)
|
||||
//
|
||||
static int check_for_aws_format(const kvmap_t& kvmap)
|
||||
{
|
||||
size_t first_pos = string::npos;
|
||||
string line;
|
||||
bool got_access_key_id_line = 0;
|
||||
bool got_secret_key_line = 0;
|
||||
string str1 ("AWSAccessKeyId=");
|
||||
string str2 ("AWSSecretKey=");
|
||||
size_t found;
|
||||
string AccessKeyId;
|
||||
string SecretAccesskey;
|
||||
string str1(aws_accesskeyid);
|
||||
string str2(aws_secretkey);
|
||||
|
||||
|
||||
ifstream PF(passwd_file.c_str());
|
||||
if(PF.good()){
|
||||
while (getline(PF, line)){
|
||||
if(line[0]=='#'){
|
||||
continue;
|
||||
}
|
||||
if(line.size() == 0){
|
||||
continue;
|
||||
}
|
||||
if('\r' == line[line.size() - 1]){
|
||||
line = line.substr(0, line.size() - 1);
|
||||
if(line.size() == 0){
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
first_pos = line.find_first_of(" \t");
|
||||
if(first_pos != string::npos){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
first_pos = line.find_first_of("[");
|
||||
if(first_pos != string::npos && first_pos == 0){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
found = line.find(str1);
|
||||
if(found != string::npos){
|
||||
first_pos = line.find_first_of("=");
|
||||
AccessKeyId = line.substr(first_pos + 1, string::npos);
|
||||
got_access_key_id_line = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
found = line.find(str2);
|
||||
if(found != string::npos){
|
||||
first_pos = line.find_first_of("=");
|
||||
SecretAccesskey = line.substr(first_pos + 1, string::npos);
|
||||
got_secret_key_line = 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(got_access_key_id_line && got_secret_key_line){
|
||||
if(!S3fsCurl::SetAccessKey(AccessKeyId.c_str(), SecretAccesskey.c_str())){
|
||||
S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified.");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}else{
|
||||
return 0;
|
||||
if(kvmap.empty()){
|
||||
return 0;
|
||||
}
|
||||
if(kvmap.end() == kvmap.find(str1) && kvmap.end() == kvmap.find(str2)){
|
||||
return 0;
|
||||
}
|
||||
if(kvmap.end() == kvmap.find(str1) || kvmap.end() == kvmap.find(str2)){
|
||||
S3FS_PRN_EXIT("AWSAccesskey or AWSSecretkey is not specified.");
|
||||
return -1;
|
||||
}
|
||||
if(!S3fsCurl::SetAccessKey(kvmap.at(str1).c_str(), kvmap.at(str2).c_str())){
|
||||
S3FS_PRN_EXIT("failed to set access key/secret key.");
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
//
|
||||
@ -3999,13 +4030,10 @@ static int check_passwd_file_perms(void)
|
||||
//
|
||||
static int read_passwd_file(void)
|
||||
{
|
||||
string line;
|
||||
string field1, field2, field3;
|
||||
size_t first_pos = string::npos;
|
||||
size_t last_pos = string::npos;
|
||||
bool default_found = 0;
|
||||
int aws_format;
|
||||
|
||||
bucketkvmap_t bucketmap;
|
||||
kvmap_t keyval;
|
||||
int result;
|
||||
|
||||
// if you got here, the password file
|
||||
// exists and is readable by the
|
||||
// current user, check for permissions
|
||||
@ -4013,80 +4041,44 @@ static int read_passwd_file(void)
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
aws_format = check_for_aws_format();
|
||||
if(1 == aws_format){
|
||||
return EXIT_SUCCESS;
|
||||
}else if(-1 == aws_format){
|
||||
//
|
||||
// parse passwd file
|
||||
//
|
||||
result = parse_passwd_file(bucketmap);
|
||||
if(-1 == result){
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
ifstream PF(passwd_file.c_str());
|
||||
if(PF.good()){
|
||||
while (getline(PF, line)){
|
||||
if(line[0]=='#'){
|
||||
continue;
|
||||
}
|
||||
if(line.size() == 0){
|
||||
continue;
|
||||
}
|
||||
if('\r' == line[line.size() - 1]){
|
||||
line = line.substr(0, line.size() - 1);
|
||||
if(line.size() == 0){
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
first_pos = line.find_first_of(" \t");
|
||||
if(first_pos != string::npos){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, found whitespace character.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
first_pos = line.find_first_of("[");
|
||||
if(first_pos != string::npos && first_pos == 0){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, found a bracket \"[\" character.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
first_pos = line.find_first_of(":");
|
||||
if(first_pos == string::npos){
|
||||
S3FS_PRN_EXIT("invalid line in passwd file, no \":\" separator found.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
last_pos = line.find_last_of(":");
|
||||
|
||||
if(first_pos != last_pos){
|
||||
// bucket specified
|
||||
field1 = line.substr(0,first_pos);
|
||||
field2 = line.substr(first_pos + 1, last_pos - first_pos - 1);
|
||||
field3 = line.substr(last_pos + 1, string::npos);
|
||||
}else{
|
||||
// no bucket specified - original style - found default key
|
||||
if(default_found == 1){
|
||||
S3FS_PRN_EXIT("more than one default key pair found in passwd file.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
default_found = 1;
|
||||
field1.assign("");
|
||||
field2 = line.substr(0,first_pos);
|
||||
field3 = line.substr(first_pos + 1, string::npos);
|
||||
if(!S3fsCurl::SetAccessKey(field2.c_str(), field3.c_str())){
|
||||
S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
// does the bucket we are mounting match this passwd file entry?
|
||||
// if so, use that key pair, otherwise use the default key, if found,
|
||||
// will be used
|
||||
if(field1.size() != 0 && field1 == bucket){
|
||||
if(!S3fsCurl::SetAccessKey(field2.c_str(), field3.c_str())){
|
||||
S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
//
|
||||
// check key=value type format.
|
||||
//
|
||||
if(bucketmap.end() != bucketmap.find(keyval_fields_type)){
|
||||
// aws format
|
||||
result = check_for_aws_format(bucketmap[keyval_fields_type]);
|
||||
if(-1 == result){
|
||||
return EXIT_FAILURE;
|
||||
}else if(1 == result){
|
||||
// success to set
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
string bucket_key = allbucket_fields_type;
|
||||
if(0 < bucket.size() && bucketmap.end() != bucketmap.find(bucket)){
|
||||
bucket_key = bucket;
|
||||
}
|
||||
if(bucketmap.end() == bucketmap.find(bucket_key)){
|
||||
S3FS_PRN_EXIT("Not found access key/secret key in passwd file.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
keyval = bucketmap[bucket_key];
|
||||
if(keyval.end() == keyval.find(string(aws_accesskeyid)) || keyval.end() == keyval.find(string(aws_secretkey))){
|
||||
S3FS_PRN_EXIT("Not found access key/secret key in passwd file.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
if(!S3fsCurl::SetAccessKey(keyval.at(string(aws_accesskeyid)).c_str(), keyval.at(string(aws_secretkey)).c_str())){
|
||||
S3FS_PRN_EXIT("failed to set internal data for access key/secret key from passwd file.");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
@ -4114,12 +4106,12 @@ static int get_access_keys(void)
|
||||
}
|
||||
|
||||
// access key loading is deferred
|
||||
if(load_iamrole){
|
||||
if(load_iamrole || is_ecs){
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
// 1 - keys specified on the command line
|
||||
if(S3fsCurl::IsSetAccessKeyId()){
|
||||
if(S3fsCurl::IsSetAccessKeys()){
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
@ -4183,7 +4175,7 @@ static int get_access_keys(void)
|
||||
// It is possible that the user's file was there but
|
||||
// contained no key pairs i.e. commented out
|
||||
// in that case, go look in the final location
|
||||
if(S3fsCurl::IsSetAccessKeyId()){
|
||||
if(S3fsCurl::IsSetAccessKeys()){
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
}
|
||||
@ -4549,7 +4541,31 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
passwd_file = strchr(arg, '=') + sizeof(char);
|
||||
return 0;
|
||||
}
|
||||
if(0 == strcmp(arg, "ibm_iam_auth")){
|
||||
S3fsCurl::SetIsIBMIAMAuth(true);
|
||||
S3fsCurl::SetIAMCredentialsURL("https://iam.bluemix.net/oidc/token");
|
||||
S3fsCurl::SetIAMTokenField("access_token");
|
||||
S3fsCurl::SetIAMExpiryField("expiration");
|
||||
S3fsCurl::SetIAMFieldCount(2);
|
||||
is_ibm_iam_auth = true;
|
||||
return 0;
|
||||
}
|
||||
if(0 == strcmp(arg, "ecs")){
|
||||
if (is_ibm_iam_auth) {
|
||||
S3FS_PRN_EXIT("option ecs cannot be used in conjunction with ibm");
|
||||
return -1;
|
||||
}
|
||||
S3fsCurl::SetIsECS(true);
|
||||
S3fsCurl::SetIAMCredentialsURL("http://169.254.170.2");
|
||||
S3fsCurl::SetIAMFieldCount(5);
|
||||
is_ecs = true;
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "iam_role")){
|
||||
if (is_ecs || is_ibm_iam_auth) {
|
||||
S3FS_PRN_EXIT("option iam_role cannot be used in conjunction with ecs or ibm");
|
||||
return -1;
|
||||
}
|
||||
if(0 == strcmp(arg, "iam_role") || 0 == strcmp(arg, "iam_role=auto")){
|
||||
// loading IAM role name in s3fs_init(), because we need to wait initializing curl.
|
||||
//
|
||||
@ -4589,7 +4605,6 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "host=")){
|
||||
host = strchr(arg, '=') + sizeof(char);
|
||||
is_changed_default_host = true;
|
||||
return 0;
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "servicepath=")){
|
||||
@ -4710,7 +4725,6 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
|
||||
}
|
||||
if(0 == STR2NCMP(arg, "url=")){
|
||||
host = strchr(arg, '=') + sizeof(char);
|
||||
is_changed_default_host = true;
|
||||
// strip the trailing '/', if any, off the end of the host
|
||||
// string
|
||||
size_t found, length;
|
||||
@ -4943,19 +4957,19 @@ int main(int argc, char* argv[])
|
||||
}
|
||||
|
||||
// error checking of command line arguments for compatibility
|
||||
if(S3fsCurl::IsPublicBucket() && S3fsCurl::IsSetAccessKeyId()){
|
||||
if(S3fsCurl::IsPublicBucket() && S3fsCurl::IsSetAccessKeys()){
|
||||
S3FS_PRN_EXIT("specifying both public_bucket and the access keys options is invalid.");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(passwd_file.size() > 0 && S3fsCurl::IsSetAccessKeyId()){
|
||||
if(passwd_file.size() > 0 && S3fsCurl::IsSetAccessKeys()){
|
||||
S3FS_PRN_EXIT("specifying both passwd_file and the access keys options is invalid.");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(!S3fsCurl::IsPublicBucket() && !load_iamrole){
|
||||
if(!S3fsCurl::IsPublicBucket() && !load_iamrole && !is_ecs){
|
||||
if(EXIT_SUCCESS != get_access_keys()){
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(!S3fsCurl::IsSetAccessKeyId()){
|
||||
if(!S3fsCurl::IsSetAccessKeys()){
|
||||
S3FS_PRN_EXIT("could not establish security credentials, check documentation.");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
@ -4969,6 +4983,29 @@ int main(int argc, char* argv[])
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// check IBM IAM requirements
|
||||
if(is_ibm_iam_auth){
|
||||
|
||||
// check that default ACL is either public-read or private
|
||||
string defaultACL = S3fsCurl::GetDefaultAcl();
|
||||
if(defaultACL == "private"){
|
||||
// IBM's COS default ACL is private
|
||||
// set acl as empty string to avoid sending x-amz-acl header
|
||||
S3fsCurl::SetDefaultAcl("");
|
||||
}else if(defaultACL != "public-read"){
|
||||
S3FS_PRN_EXIT("can only use 'public-read' or 'private' ACL while using ibm_iam_auth");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(create_bucket && !S3fsCurl::IsSetAccessKeyID()){
|
||||
S3FS_PRN_EXIT("missing service instance ID for bucket creation");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// set user agent
|
||||
S3fsCurl::InitUserAgent();
|
||||
|
||||
// There's room for more command line error checking
|
||||
|
||||
// Check to see if the bucket name contains periods and https (SSL) is
|
||||
|
||||
@ -21,7 +21,8 @@
|
||||
#define S3FS_S3_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#define FIVE_GB 5368709120LL
|
||||
|
||||
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
|
||||
@ -510,8 +510,17 @@ int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
return -ENOMEM;
|
||||
}
|
||||
// get group information
|
||||
if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
S3FS_PRN_ERR("could not get group information.");
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
free(pbuf);
|
||||
maxlen *= 2;
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get group information(%d).", result);
|
||||
free(pbuf);
|
||||
return -result;
|
||||
}
|
||||
@ -1129,12 +1138,8 @@ void show_help (void)
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
" If you start s3fs without specifying the url option, s3fs will\n"
|
||||
" check the bucket using https://s3.amazonaws.com. And when bucket\n"
|
||||
" check fails, s3fs retries the bucket check using\n"
|
||||
" http://s3.amazonaws.com. This is the function left behind for\n"
|
||||
" backward compatibility. If you do not use https, please specify\n"
|
||||
" the URL with the url option.\n"
|
||||
" If you do not use https, please specify the URL with the url\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" endpoint (default=\"us-east-1\")\n"
|
||||
" - sets the endpoint to use on signature version 4\n"
|
||||
@ -1162,12 +1167,21 @@ void show_help (void)
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - ensure data integrity during writes with MD5 hash.\n"
|
||||
"\n"
|
||||
" ecs\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" ibm_iam_auth\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute(xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
|
||||
@ -32,6 +32,21 @@
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <class T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
template std::string str(unsigned short value);
|
||||
template std::string str(int value);
|
||||
template std::string str(unsigned int value);
|
||||
template std::string str(long value);
|
||||
template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
|
||||
@ -28,16 +28,12 @@
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
#define SPACES " \t\r\n"
|
||||
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
|
||||
static const std::string SPACES = " \t\r\n";
|
||||
|
||||
template<typename T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); }
|
||||
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
|
||||
|
||||
@ -34,8 +34,8 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ set -o errexit
|
||||
S3FS=../src/s3fs
|
||||
|
||||
# Allow these defaulted values to be overridden
|
||||
: ${S3_URL:="http://127.0.0.1:8080"}
|
||||
: ${S3_URL:="https://127.0.0.1:8080"}
|
||||
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
|
||||
: ${TEST_BUCKET_1:="s3fs-integration-test"}
|
||||
|
||||
@ -50,7 +50,7 @@ export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.5.2"
|
||||
S3PROXY_VERSION="1.5.3"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
@ -108,7 +108,8 @@ function start_s3proxy {
|
||||
chmod +x "${S3PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG &
|
||||
S3PROXY_PID=$!
|
||||
|
||||
# wait for S3Proxy to start
|
||||
for i in $(seq 30);
|
||||
@ -121,8 +122,6 @@ function start_s3proxy {
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
|
||||
fi
|
||||
}
|
||||
|
||||
@ -130,7 +129,6 @@ function stop_s3proxy {
|
||||
if [ -n "${S3PROXY_PID}" ]
|
||||
then
|
||||
kill $S3PROXY_PID
|
||||
wait $S3PROXY_PID
|
||||
fi
|
||||
}
|
||||
|
||||
@ -181,15 +179,34 @@ function start_s3fs {
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o no_check_certificate \
|
||||
-o ssl_verify_hostname=0 \
|
||||
-o createbucket \
|
||||
${AUTH_OPT} \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
-o retries=3 \
|
||||
-f \
|
||||
${@} \
|
||||
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
${@} | stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
)
|
||||
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
set +o errexit
|
||||
TRYCOUNT=0
|
||||
while [ $TRYCOUNT -le 20 ]; do
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=`expr ${TRYCOUNT} + 1`
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
set -o errexit
|
||||
else
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
fi
|
||||
|
||||
# Quick way to start system up for manual testing with options under test
|
||||
if [[ -n ${INTERACT} ]]; then
|
||||
@ -202,14 +219,21 @@ function start_s3fs {
|
||||
|
||||
function stop_s3fs {
|
||||
# Retry in case file system is in use
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
retry 10 df | grep -q $TEST_BUCKET_MOUNT_POINT_1 && umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
else
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
|
||||
function common_exit_handler {
|
||||
stop_s3proxy
|
||||
stop_s3fs
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
@ -6,7 +6,11 @@ source test-utils.sh
|
||||
|
||||
function test_append_file {
|
||||
describe "Testing append to file ..."
|
||||
|
||||
# Write a small test file
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > ${TEST_TEXT_FILE}
|
||||
fi
|
||||
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
|
||||
do
|
||||
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
@ -51,7 +55,11 @@ function test_truncate_empty_file {
|
||||
truncate ${TEST_TEXT_FILE} -s $t_size
|
||||
|
||||
# Verify file is zero length
|
||||
size=$(stat -c %s ${TEST_TEXT_FILE})
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
size=$(stat -f "%z" ${TEST_TEXT_FILE})
|
||||
else
|
||||
size=$(stat -c %s ${TEST_TEXT_FILE})
|
||||
fi
|
||||
if [ $t_size -ne $size ]
|
||||
then
|
||||
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
|
||||
@ -77,6 +85,9 @@ function test_mv_file {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
# save file length
|
||||
ALT_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
|
||||
#rename the test file
|
||||
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ ! -e $ALT_TEST_TEXT_FILE ]
|
||||
@ -86,7 +97,6 @@ function test_mv_file {
|
||||
fi
|
||||
|
||||
# Check the contents of the alt file
|
||||
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
|
||||
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
|
||||
then
|
||||
@ -179,12 +189,21 @@ function test_chmod {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chmod 777 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE permissions"
|
||||
return 1
|
||||
@ -200,12 +219,21 @@ function test_chown {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chown 1000:1000 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%u:%g $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
if [ $ORIGINAL_PERMISSIONS == "1000:1000" ]
|
||||
then
|
||||
@ -262,6 +290,10 @@ function test_rename_before_close {
|
||||
|
||||
function test_multipart_upload {
|
||||
describe "Testing multi-part upload ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
|
||||
@ -278,6 +310,10 @@ function test_multipart_upload {
|
||||
|
||||
function test_multipart_copy {
|
||||
describe "Testing multi-part copy ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
mv "${BIG_FILE}" "${BIG_FILE}-copy"
|
||||
@ -364,8 +400,13 @@ function test_mtime_file {
|
||||
|
||||
#copy the test file with preserve mode
|
||||
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
testmtime=`stat -f "%m" $TEST_TEXT_FILE`
|
||||
altmtime=`stat -f "%m" $ALT_TEST_TEXT_FILE`
|
||||
else
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
fi
|
||||
if [ "$testmtime" -ne "$altmtime" ]
|
||||
then
|
||||
echo "File times do not match: $testmtime != $altmtime"
|
||||
|
||||
BIN
test/keystore.jks
Normal file
BIN
test/keystore.jks
Normal file
Binary file not shown.
@ -1,7 +1,9 @@
|
||||
s3proxy.endpoint=http://127.0.0.1:8080
|
||||
s3proxy.secure-endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
s3proxy.keystore-path=keystore.jks
|
||||
s3proxy.keystore-password=password
|
||||
|
||||
jclouds.provider=transient
|
||||
jclouds.identity=remote-identity
|
||||
|
||||
@ -24,6 +24,21 @@ function mk_test_file {
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# wait & check
|
||||
BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'`
|
||||
TRY_COUNT=10
|
||||
while true; do
|
||||
MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then
|
||||
break
|
||||
fi
|
||||
TRY_COUNT=`expr $TRY_COUNT - 1`
|
||||
if [ $TRY_COUNT -le 0 ]; then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function rm_test_file {
|
||||
@ -65,9 +80,9 @@ function cd_run_dir {
|
||||
echo "TEST_BUCKET_MOUNT_POINT variable not set"
|
||||
exit 1
|
||||
fi
|
||||
RUN_DIR=$(mktemp --directory ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
|
||||
RUN_DIR=$(mktemp -d ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
|
||||
cd ${RUN_DIR}
|
||||
}
|
||||
}
|
||||
|
||||
function clean_run_dir {
|
||||
if [ -d ${RUN_DIR} ]; then
|
||||
|
||||
Reference in New Issue
Block a user