687 Commits
v1.81 ... v1.87

Author SHA1 Message Date
194262c0ef Update ChangeLog and configure.ac for 1.87
Fixes #1335.
2020-08-10 11:52:14 +09:00
e2fbcb4d30 Merge pull request #1348 from gaul/readme/ibm
Move IBM information to Non Amazon S3 wiki page
2020-08-03 00:55:11 +09:00
0c1bc0f803 Add portability wrapper for stat(1)
Fixes #1344.
2020-08-02 23:14:58 +09:00
83361e7905 Add some code backticks to README 2020-08-02 23:02:11 +09:00
19abd9ffaf Move IBM information to Non Amazon S3 wiki page
This gives consistency with other providers:

https://github.com/s3fs-fuse/s3fs-fuse/wiki/Non-Amazon-S3
2020-08-01 20:22:30 +09:00
cbd925c56f Moved the SIGUSR2 handler to S3fsSignals class 2020-07-28 14:54:35 +09:00
63bbb47378 Merge pull request #1341 from gaul/stat-cache/default
Change default stat_cache_expire
2020-07-26 23:54:14 +09:00
0fbd0eac80 Change default stat_cache_expire
Previously s3fs cached files forever which confused users with
creating objects using another client.
2020-07-26 23:04:43 +09:00
e5231fa3c7 Merge pull request #1340 from gaul/test/external-directory
Test creating a directory with external program
2020-07-26 22:06:40 +09:00
ad1961417d Added SIGUSR1 option for cache file integrity test 2020-07-26 21:04:11 +09:00
4154e539ea Test creating a directory with external program
References #1264.
2020-07-25 21:22:53 +09:00
e0a38adaf6 Merge pull request #1333 from ggtakec/similar_processing
Put similar processing together into method GetCacheFileStatTopDir
2020-07-12 20:29:45 +09:00
c3e711da58 Merge pull request #1327 from pxeger/master
`s3fs#bucketname ... fuse` -> `bucketname ... fuse.s3fs` (#1321)
2020-07-12 19:14:27 +09:00
700e288718 Put similar processing together into method GetCacheFileStatTopDir 2020-07-12 08:12:02 +00:00
e72a64785b Merge pull request #1331 from gaul/travis/simplify
Simplify conditional statements
2020-07-12 12:52:21 +09:00
5ace2b70fc Simplify conditional statements 2020-07-10 19:34:46 +09:00
62c8be85d6 Interim fix for Travis macOS Build 2020-07-10 09:47:18 +09:00
3f6b5ae6a5 Merge pull request #1329 from gaul/cppcheck-2.1
Fix NULL pointer deference
2020-07-09 23:00:36 +09:00
dc365b65a0 Fix NULL pointer deference
Found via cppcheck 2.1.
2020-07-09 21:40:23 +09:00
9c88ec2128 Merge pull request #1326 from gaul/test/python2
Use Python 2 for write_multiple_offsets
2020-07-09 18:45:41 +09:00
3dd9832f61 s3fs#bucketname ... fuse -> bucketname ... fuse.s3fs (#1321) 2020-07-07 16:06:00 +01:00
4d1f5c899f Use Python 2 for write_multiple_offsets
This aligns with ut_test.py .  Using an older Python also allows
compatibility with the older macOS 10.12 Travis CI.  References #1323.
2020-07-07 21:12:52 +09:00
1f796d432d Fixed upload error about mixuploading sparse file and truncating file 2020-06-27 22:44:19 +09:00
35006e318f Fixed about ParallelMixMultipartUpload 2020-06-24 12:48:55 +09:00
7d0c66e08a Add support for glacier storage class.
Just a copy of what have been done in PR #271.
2020-06-23 11:23:21 +09:00
9dc4148743 Merge pull request #1312 from ggtakec/fix_bug_cache
Fixed a bug about serializing from cache file
2020-06-19 22:54:39 +09:00
f324d8e04f Fixed a bug about serializing from cache file 2020-06-19 12:57:27 +00:00
f16ee96d7e Merge pull request #1306 from gaul/http/500
Retry with exponential backoff during 500 error
2020-06-06 15:30:22 +09:00
0d849b38c2 Merge pull request #1305 from gaul/alibaba/multipart
Ignore case when comparing ETags
2020-06-06 15:05:39 +09:00
8ed020610f Merge pull request #1296 from gaul/test/oss
Import ossfs tests
2020-06-06 14:40:11 +09:00
d8766b2051 Retry with exponential backoff during 500 error
Amazon suggests retrying on both 500 and 503:

https://aws.amazon.com/premiumsupport/knowledge-center/http-5xx-errors-s3/

Fixes #1251.
2020-06-05 21:01:30 +09:00
9db70bab63 Ignore case when comparing ETags
This allows multipart upload to work with Alibaba OSS.
References #1297.
2020-06-05 18:17:52 +09:00
8a7548a9d4 Import ossfs tests
This turns up a failure in test_rename_file when calling seek(0) on a
renamed file.
2020-06-01 09:08:25 +09:00
0cb057dadd Merge pull request #1303 from gaul/rename/use_cache
Relink cache stats file atomically via rename
2020-06-01 00:10:33 +09:00
0f5db0d1bf Merge pull request #1302 from gaul/rename/nocopy
Fix renames of open files with nocopyapi option
2020-05-31 23:46:46 +09:00
94e67c9c58 Merge pull request #1301 from gaul/pthread-result
Check results from pthread mutex calls
2020-05-31 23:11:24 +09:00
274321524c Relink cache stats file atomically via rename
The new file may already exist so link may fail.  Further link/unlink
is not atomic.  Addresses an error when renaming an open with with
use_cache.  References #1296.
2020-05-31 23:09:58 +09:00
40f7007263 Check results from pthread mutex calls
Also remove some unnecessary exception handling.
2020-05-30 16:37:55 +09:00
66597ec5f2 Fix renames of open files with nocopyapi option
References #1296.
2020-05-30 15:45:43 +09:00
75e72385cc Added a parameter to output body to curldbg option 2020-05-25 08:49:01 +09:00
eb58460175 Merge pull request #1294 from gaul/test/profile
Allow overriding test credentials with a profile
2020-05-24 16:14:31 +09:00
0852521a7e Merge pull request #1293 from gaul/s3proxy
Update to S3Proxy 1.7.1
2020-05-24 15:22:00 +09:00
56ed6bb97f Merge pull request #1292 from gaul/retries
Do not allow zero retries
2020-05-24 15:03:03 +09:00
73098220bf Allow overriding test credentials with a profile 2020-05-24 12:05:16 +09:00
ca7756fa77 Update to S3Proxy 1.7.1
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.7.1
2020-05-23 10:06:44 +09:00
8b15db6dcb Do not allow zero retries
Retries actually means tries, e.g., if the user sets zero, s3fs will
never try an operation at all.
2020-05-23 10:05:23 +09:00
0b60aa81eb Merge pull request #1285 from ggtakec/wrapped_s3fs_strtoofft
Not abort process by exception threw from s3fs_strtoofft
2020-05-22 22:36:34 +09:00
da70cb92a8 Provide AWS CLI with correct test credentials
This allows tests to pass against real S3 implementations like
Backblaze.  References #272.
2020-05-22 19:27:18 +09:00
746a027e98 Expand on random write limitation 2020-05-05 08:12:04 +09:00
80c11b6c12 Not abort process by exception threw from s3fs_strtoofft 2020-05-03 13:46:05 +00:00
b76226a06d Merge pull request #1286 from gaul/gcs
Support Google Cloud Storage headers
2020-05-03 22:41:02 +09:00
8945e98d8b Support Google Cloud Storage headers
This allows s3fs to interpret objects created by gsutil.
2020-05-03 18:33:13 +09:00
97c249d5b9 Not abort process by exception threw from s3fs_strtoofft 2020-05-03 08:08:28 +00:00
6e134a23f9 Merge pull request #1280 from ggtakec/add_test_fdcache
Add test for cache file stat content
2020-05-03 16:33:50 +09:00
a4803543a1 Merge pull request #1282 from gaul/mime
Warn about missing MIME types instead of exiting
2020-05-03 15:15:05 +09:00
2cc88b933f Warn about missing MIME types instead of exiting
s3fs uses the MIME types file to set Content-Type for uploaded
objects.  Most distribution packages should install this via
recommended (not required) dependencies.  Users compiling from source
may not have this installed and s3fs should not prevent launching
since most users do not care about Content-Type.  Instead warn about
MIME types absence.  Fixes #1270.
2020-04-29 20:03:50 +09:00
ce1221c867 Add test for cache file stat content 2020-04-22 15:53:00 +00:00
005a684600 Fix typos 2020-04-22 21:49:11 +09:00
3af17c3019 Add test for out-of-order writes
References #1220.  References #1277.
2020-04-22 21:48:55 +09:00
f26a0aa71d Fixed insufficient upload size for mix multipart upload 2020-04-22 09:31:22 +09:00
2b4619842d Merge pull request #1278 from gaul/http-416
Add handler for HTTP 416
2020-04-21 22:38:54 +09:00
cf529e0af7 Add handler for HTTP 416
This prevents retries when the server indicates an unsatisfiable MPU
copy part request.  References #1220.
2020-04-21 19:45:10 +09:00
4da02d023b Improved strictness of cache file stats(file) 2020-04-21 19:45:03 +09:00
fe0677651e Merge pull request #1271 from ggtakec/fix_stat_file
Fixed the truncation bug of stat file for cache file
2020-04-19 16:27:11 +09:00
811ea0cb85 Fixed the truncation bug of stat file for cache file 2020-04-19 07:08:49 +00:00
a5f84535f3 Add install instructions for Arch Linux 2020-04-18 19:27:52 +09:00
84bf460f99 Remove deprecated sudo configuration
Addresses warnings of the form:

jobs.include: deprecated key sudo (The key `sudo` has no effect
anymore.)
2020-04-15 10:09:52 +09:00
538fbed302 Merge pull request #1266 from gaul/test/cache-eviction
Test cache eviction
2020-04-14 22:30:04 +09:00
feafb44bae Clean up macOS FUSE loading 2020-04-12 23:18:27 +09:00
a44fc1103d Avoid setting bogus group id
Addresses a symptom on newer macOS in Travis.
2020-04-12 22:37:22 +09:00
48a872e285 Address cppcheck 1.90 warning 2020-04-12 22:20:44 +09:00
c44a60f3f5 Fixed a bug of stats cache compression 2020-04-12 18:33:00 +09:00
f373df9682 Test cache eviction 2020-04-11 19:00:38 +09:00
9e01d5b8d1 Merge pull request #1254 from ggtakec/modify_mimetypes
Added mime option for strict checking of mime types file
2020-04-11 14:48:47 +09:00
7fbda230f5 Added mime option for strict checking of mime types file 2020-03-30 14:41:18 +00:00
56141557dc Avoid unneeded string copy
Found by clang-tidy 10.
2020-03-28 08:49:49 +09:00
fe2b269b6e Merge pull request #1253 from juliogonzalez/fix-1217
Generic compilation instructions: explain that /etc/mime.type is needed
2020-03-19 21:06:32 +09:00
eb6fe69af2 Generic compilation instructions: explain that /etc/mime.type is needed 2020-03-18 23:41:52 +01:00
6489c5d394 Merge pull request #1247 from ggtakec/fix_travis_timeout
Avoid TravisCI timeouts in test execution
2020-03-15 16:48:25 +09:00
854a8a8356 Avoid TravisCI timeouts in test execution 2020-02-24 09:56:18 +00:00
d34475d6a1 Add random writes and appends to README 2020-02-24 18:21:13 +09:00
b72f4b43a4 use correct content-type when complete multipart upload 2020-02-10 16:58:28 +09:00
34e797d6f5 Add Twitter link 2020-02-07 09:01:50 +09:00
bb20fc3c98 Update ChangeLog and configure.ac for 1.86
Fixes #1050.
2020-02-05 22:27:28 +09:00
3e66e42ae5 Merge pull request #1242 from ggtakec/bypass_test
Avoid test errors on MacOS
2020-02-05 01:29:11 +09:00
0665d78550 Temporary measure: reduce test time on MacOS 2020-02-04 15:38:19 +00:00
55d670f22f Change content-types test for MacOS 2020-02-04 14:51:50 +00:00
32ae0d2c79 Bypassed MacOS ensure_diskfree test 2020-02-04 14:00:47 +00:00
924eeb3587 Document host and servicepath
The various bits of host, url, and servicepath seem to overlap.
References #1203.
2020-02-04 21:13:29 +09:00
bc9126d774 Set directory MIME type to application/x-directory
Previously s3fs auto-detected the MIME type of directories like
"TOYOTA TRUCK 8.2.2" as application/x-troff-man.  This caused get_mode
to not set S_IFDIR which failed directory creation.  Instead force all
object names ending in / to application/x-directory.  Fixes #1183.
2020-02-04 20:03:21 +09:00
4df50e7f85 Test that s3fs automatically detects Content-Type
References #1217.
2020-02-04 19:47:09 +09:00
4e26728cbf break recursion when calling GetIAMCredentials (#1233)
break recursion when calling GetIAMCredentials
2020-01-31 16:48:37 +09:00
7135666060 Merge pull request #1234 from ggtakec/fix_dbg
Suppressed a lot of message output about cache cleanup
2020-01-31 01:00:46 +09:00
018ccb9a11 Suppressed a lot of message output about cache cleanup 2020-01-30 14:42:28 +00:00
ee1d3a9057 Merge pull request #1151 from liuyongqing/master
fix deadlock in clean up cache
2020-01-30 22:17:50 +09:00
b762a0a85b fix deadlock due to fdmanager_lock and fdent_data_lock 2020-01-29 12:03:53 +08:00
9771be29b2 Merge pull request #1232 from ggtakec/test_err_avoid
Not trap chown command errors directly
2020-01-28 20:47:54 +09:00
010a6b83ef Not trap chown command errors directly 2020-01-28 10:32:55 +00:00
87224b830b Merge pull request #1230 from ggtakec/fix_test
Fixed removing test file in integration-test-main.sh
2020-01-27 00:39:50 +09:00
9e77650e8c Fixed removing test file in integration-test-main.sh 2020-01-26 13:32:41 +00:00
e0712f444d Update source for clang-tidy 9.0.0 2020-01-13 20:56:45 +09:00
913b72fdaf Add intelligent_tiering to man page
Follows on to 39102608aa.
References #1219.
2020-01-13 20:26:01 +09:00
39102608aa Add intelligent tiering as option for storage class
Resolves Issue #1219
2020-01-13 20:24:35 +09:00
23945a0130 In memory cache for softlinks with cache out 2020-01-13 20:23:10 +09:00
bdfb9ee815 Document requester_pays option 2019-11-25 22:53:50 +09:00
1a75a94253 Merge pull request #1205 from mikelolasagasti/update-doc
Update install instructions for Debian and Fedora
2019-11-24 19:41:05 +09:00
a9d527d517 Merge pull request #1199 from ahuarte47/master_requester-pays
Adds requester_pays option to enable requests involving Requester Pays buckets
2019-11-24 17:44:34 +09:00
94666f7754 Merge pull request #1197 from bbxiao1/compilation-dependencies
Specfiy pkg-config as a necessary dependency for compilation
2019-11-24 13:28:25 +09:00
41acbaa746 Merge pull request #1200 from ggtakec/travis_cache_osx
Use travis cache for brew on osx
2019-11-24 12:47:31 +09:00
d5042a73bd Update install instructions for Debian and Fedora 2019-11-22 20:46:21 +01:00
f6756596b3 Use travis cache for brew on osx 2019-11-18 12:10:06 +00:00
a24f78f5a4 Adds requester_pays option to enable requests involving Requester Pays buckets 2019-11-18 12:38:16 +01:00
64d4b2c0b0 Specfiy pkg-config as a necessary dependency for compilation 2019-11-17 17:37:07 -07:00
cc4a307415 Add Gentoo install instructions 2019-11-05 03:44:07 +09:00
a07a5336f6 Merge pull request #1185 from bryceml/master
handle edge case when bucket name and first part of endpoint match
2019-10-31 23:42:47 +09:00
9789ca1a4d Merge pull request #1188 from gaul/s3proxy
Upgrade to S3Proxy 1.7.0
2019-10-31 23:00:35 +09:00
4ec2d685e7 Merge pull request #1181 from ggtakec/fix_1171
Fixed a rename bug
2019-10-31 22:36:34 +09:00
dc62953040 Upgrade to S3Proxy 1.7.0
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.7.0
2019-10-28 20:11:01 -07:00
0c42a74a8a handle edge case when bucket name and first part of endpoint match 2019-10-26 00:51:19 -06:00
cf3e82d10a Fixed a rename bug when enable_noobj_cache 2019-10-23 12:39:42 +00:00
0e815c2fbc Merge pull request #1173 from ggtakec/fixed_1171
Fixed test_write_multiple_offsets error on osx
2019-10-15 03:30:11 +09:00
27a5536749 Fixed test_write_multiple_offsets error on osx 2019-10-14 17:29:39 +00:00
db338b36b8 Merge pull request #1160 from gaul/mix_upload
Add test for writing to multiple offsets
2019-10-14 22:07:50 +09:00
72b906255f Merge pull request #1169 from ggtakec/test_brew
Updated .travis.yml for tapping homebrew/cask-cask
2019-10-14 21:38:10 +09:00
2211678d91 Updated .travis.yml for tapping homebrew/cask-cask 2019-10-14 11:44:06 +00:00
80162c126b Add test for writing to multiple offsets
References #1098.
2019-09-25 19:31:08 -07:00
1db94a0b30 Fixed to use copy api in multipart upload 2019-09-25 19:30:58 -07:00
b6349e9428 Merge pull request #1158 from ggtakec/fix_cppcheck
Fixed build error by cppcheck 1.89
2019-09-23 20:22:33 +09:00
bedd648d47 Fixed build error by cppcheck 1.89 2019-09-23 10:49:49 +00:00
58b3cce320 Merge pull request #1066 from gaul/sign-requests
Sign request immediately before sending
2019-09-23 15:58:21 +09:00
81102a5963 Merge pull request #1149 from ggtakec/fix_retry
Fixed a bug that type and op are not inherited
2019-09-08 20:32:43 +09:00
42fb30852b Merge pull request #1145 from gaul/bool
Fix a few bool types
2019-09-08 20:04:30 +09:00
e51361cb94 Merge pull request #1143 from gaul/doc/umask
Document umask flag
2019-09-08 19:49:58 +09:00
80a9ed9d6a Merge pull request #1142 from gaul/base
Expose base parameter in s3fs_strtoofft
2019-09-08 19:31:31 +09:00
e2129001eb Fixed a bug that type and op are not inherited 2019-09-08 09:59:04 +00:00
805cc064af Fix a few bool types
Found via clang-tidy.
2019-09-05 10:42:42 -07:00
3c2279db39 Document umask flag 2019-09-04 20:53:58 -07:00
412876ca33 Expose base parameter in s3fs_strtoofft
This fixes a regression from ccf3e7bfa2
which caused the misparsing of octal inputs for the mp_umask and umask
flags.  It also allows some callers to be more precise about their
decimal inputs.
2019-09-04 20:41:47 -07:00
461a346bf4 Sign request immediately before sending
Previously s3fs could create a long list of pre-signed requests which
could take longer than the default S3 clock skew limit of 15 minutes.
This also changes SHA-256 computation from single- to multi-threaded
since this is now computed in the worker threads.  Regression from
88cd8feb05.  Fixes #1019.
2019-08-29 15:35:27 -07:00
ae4bcd405c use correct content-type when overwriting and copying files 2019-08-28 22:25:09 -07:00
0536dc1112 Refixed strict processing of MultiRead method 2019-08-25 20:46:01 -07:00
1c3507ede1 Merge pull request #1135 from gaul/curl-timeout
Remove useless CURLE_OPERATION_TIMEDOUT check
2019-08-25 17:53:02 +09:00
3f47037cc7 Merge pull request #1134 from gaul/test/size
Verify file size via metadata as well as data
2019-08-25 17:34:41 +09:00
d87321ef3c Merge pull request #1133 from gaul/default-acl
Promote default ACL to enum
2019-08-25 17:05:54 +09:00
01ac815346 Remove useless CURLE_OPERATION_TIMEDOUT check
CheckBucket only returns errno and HTTP status codes.
2019-08-20 18:44:48 -07:00
2daa1d53d9 Verify file size via metadata as well as data 2019-08-20 18:36:44 -07:00
cc2eed84a5 Promote default ACL to enum
This sanity checks ACLs during initialization and also omits sending
the header when set to PRIVATE which is the default.
2019-08-20 15:05:58 -07:00
c644e4bef2 Merge pull request #1131 from ggtakec/fix_typo
Fixed typo in #1129
2019-08-14 22:26:04 +09:00
a7d83df3eb Fixed typo in #1129 2019-08-14 12:39:38 +00:00
e1886b5343 Merge pull request #1130 from ggtakec/update_multiread
Strict processing of MultiRead method
2019-08-14 00:38:59 +09:00
873e376098 Strict processing of MultiRead method 2019-08-13 15:23:13 +00:00
3c378a4a7a Merge pull request #1129 from ggtakec/update_test
Modified the test script a little
2019-08-13 23:55:17 +09:00
ca7266fb76 Modified the test script a little 2019-08-13 14:21:42 +00:00
4a0c23258e Fixed MultiRead method to exit after waiting other parts at error 2019-08-12 14:45:06 -07:00
ccc79ec139 Merge pull request #1126 from ggtakec/fix_multipart_copy_retry
Fixed multipart copy and its retry handler
2019-08-12 01:50:44 +09:00
5c4a0a862a Fixed multipart copy and its retry handler 2019-08-11 16:21:26 +00:00
2a779df4fd Merge pull request #1125 from ggtakec/fix_nocopyapi_copy
Fixed simple copy with nocpoyapi and modify flag in fdcache
2019-08-11 17:30:04 +09:00
f5bf41cf11 Fixed simple copy with nocpoyapi and modify flag in fdcache 2019-08-11 07:42:48 +00:00
f74c7407db Merge pull request #1118 from gaul/atoi
Prefer s3fs_strtoofft over atoi and strtol
2019-08-06 21:23:37 +09:00
433c04af26 Merge pull request #1116 from gaul/bodydata
Do not heap allocate BodyData
2019-08-06 21:08:02 +09:00
2e51a339a9 Merge pull request #1115 from gaul/hash-length
Use length from s3fs_HMAC256 for consistency
2019-08-06 20:49:22 +09:00
0411872dda Merge pull request #1121 from ggtakec/update_test
Added stats cache expire option for test
2019-08-06 20:32:48 +09:00
d8f1aef7be Added stats cache expire option for test 2019-08-06 10:55:38 +00:00
14d3e12103 Merge pull request #1114 from gaul/flush/return-value
Consume return value from FdEntity::Flush
2019-08-06 14:12:25 +09:00
fd13eb314b Merge pull request #1120 from ggtakec/osx_auto_cache
Added auto_cache option for test on osx
2019-08-06 13:31:23 +09:00
daba563a1c Added auto_cache option for test on osx 2019-08-06 02:44:18 +00:00
b79b0b1a92 Merge pull request #1119 from ggtakec/osx_direct_io
Added direct_io option for test on osx
2019-08-06 11:14:51 +09:00
01d4323b50 Merge pull request #1117 from gaul/retry
Increase test startup retries on Linux
2019-08-06 10:54:55 +09:00
dc85eda188 Added direct_io option for test on osx 2019-08-05 17:12:49 +00:00
ccf3e7bfa2 Prefer s3fs_strtoofft over atoi and strtol
The former propagates errors consistently.
2019-08-03 16:13:48 -07:00
d22acae9a3 Increase test startup retries on Linux
This matches the number of retries on macOS and allows Valgrind to
work.
2019-08-03 16:10:42 -07:00
7ecfba811e Do not heap allocate BodyData
Callers already manage lifetimes via Clear().
2019-08-03 15:50:46 -07:00
1e7330e499 Merge pull request #1113 from gaul/unused
Remove unused field
2019-08-04 01:31:56 +09:00
68475e5fcf Merge pull request #1112 from gaul/travis/macos
Upgrade to Xcode 9.2
2019-08-04 01:15:18 +09:00
8cc008c501 Merge pull request #1111 from gaul/test/read-external-object
Add test to read an externally created object
2019-08-03 22:36:34 +09:00
faaaf2ee3c Merge pull request #1110 from gaul/flags/noobj
Test enable_noobj_cache
2019-08-03 22:24:56 +09:00
3d42d0515d Merge pull request #1109 from gaul/lock-already-held
Pass lock_already_held state to DelStat
2019-08-03 22:13:22 +09:00
3d70e8966f Merge pull request #1108 from gaul/lock/diskspace
Always hold lock when accessing free_disk_space
2019-08-03 22:01:31 +09:00
6837cbfe06 Merge pull request #1107 from gaul/concurrency
Access FdEntity metadata while holding fdent_lock
2019-08-03 21:49:54 +09:00
a5c20175a1 Use length from s3fs_HMAC256 for consistency
EVP_MAX_MD_SIZE can range up to 64.  Found by Coverity.
2019-08-02 10:40:55 -07:00
43d1439420 Consume return value from FdEntity::Flush
Found via Coverity.
2019-08-02 10:01:06 -07:00
d8cf26bd50 Remove unused field
Found via clang.
2019-08-01 16:42:38 -07:00
c321c8c23f Add test to read an externally created object
References #890.
2019-08-01 16:12:51 -07:00
6227fce091 Upgrade to Xcode 9.2
This is the latest version supported by macOS 10.12.
2019-08-01 15:50:50 -07:00
aba8e6ccfa Test enable_noobj_cache 2019-08-01 12:39:11 -07:00
f528a86219 Pass lock_already_held state to DelStat 2019-08-01 11:07:56 -07:00
5b15c7c4e9 Always hold lock when accessing free_disk_space
Slightly reorder locks to avoid double locking.  Found via
ThreadSanitizer.
2019-07-30 08:36:54 -07:00
afd438d363 Access FdEntity metadata while holding fdent_lock
Create copies to avoid thread races.  Found via ThreadSanitizer.
Follows on to ecf13a8cb9.
2019-07-29 13:54:57 -07:00
80972aa33d Merge pull request #1106 from ggtakec/fix_filter
Fixed filter-suite-log.sh for on osx
2019-07-22 18:13:07 +09:00
520995a7e8 Merge pull request #1104 from gaul/sysconf
Eagerly initialize sysconf variables
2019-07-22 18:04:39 +09:00
5c3c6bff2f Fixed filter-suite-log.sh for on osx 2019-07-21 15:32:10 +00:00
fb937635f5 Eagerly initialize sysconf variables
Previously s3fs had races updating these shared variables.  Found via
ThreadSanitizer.
2019-07-17 09:08:13 -07:00
3ad1c95e86 Merge pull request #1103 from gaul/config/readwrite-timeout
Use consistent default for readwrite_timeout
2019-07-17 22:37:02 +09:00
2c4c78fd65 Merge pull request #1102 from gaul/test/concurrent-write
Add a test for concurrent writes
2019-07-17 22:24:45 +09:00
0afef077ed Merge pull request #1101 from gaul/gmtime
Avoid race when using thread-unsafe gmtime
2019-07-17 22:17:18 +09:00
80f598f439 Merge pull request #1099 from ggtakec/filter_log
Add log output filter script at test failure
2019-07-17 22:01:28 +09:00
bacd15714a Use consistent default for readwrite_timeout
Previously curl threads mutated this shared state without a lock.
Found via ThreadSanitizer.
2019-07-16 22:03:56 -07:00
5cb7a31c09 Add a test for concurrent writes 2019-07-16 21:57:43 -07:00
99aace4fc9 Avoid race when using thread-unsafe gmtime
Found via ThreadSanitizer.
2019-07-16 07:28:34 -07:00
c7f8f61d09 Change test_concurrency test 2019-07-15 15:58:54 -07:00
159cd2c682 Add log output filter script at test failure 2019-07-15 06:37:43 +00:00
513f41fddf Merge pull request #1097 from ggtakec/fix_lock
Add Lock() func parameter in RowFlush()
2019-07-15 11:56:21 +09:00
543aed2a32 Merge pull request #1096 from gaul/pthread/lock-checking
Expand error checking to all pthread_mutex_t
2019-07-15 11:43:56 +09:00
20ea96328c Merge pull request #1095 from gaul/intmax_t
Avoid narrowing time_t and off_t args in logging
2019-07-15 11:33:44 +09:00
007edb1773 Merge pull request #1090 from gaul/strtoofft
Implement s3fs_strtoofft via strtoll
2019-07-15 11:26:03 +09:00
f78bcc5229 Add Lock() func parameter in RowFlush() 2019-07-15 01:52:29 +00:00
43ec064fb9 Expand error checking to all pthread_mutex_t 2019-07-14 18:21:23 -07:00
ffac4c8417 Avoid narrowing time_t and off_t args in logging
This displays correct results on 32-bit platforms.
2019-07-14 17:02:36 -07:00
4adcd4a6c8 Implement s3fs_strtoofft via strtoll
This tightens error checking and aligns s3fs with known good behavior.
2019-07-14 15:15:06 -07:00
e936854493 Merge pull request #1094 from larsch/master
Fix multipart upload not setting curl options
2019-07-15 05:13:37 +09:00
850a813171 Merge pull request #1092 from liuyongqing/master
fix coredump caused by fd_manager_lock locking
2019-07-15 04:53:10 +09:00
5bbcd3b981 Merge pull request #1089 from gaul/null-dereference
Avoid null dereference
2019-07-15 04:17:10 +09:00
a337c32840 Merge pull request #1088 from gaul/clang-tidy/misplaced-const
Avoid misplaced const via removing unneeded typedef
2019-07-15 04:08:51 +09:00
d39e4e4b1f Merge pull request #1087 from gaul/clang-tidy/deprecated-headers
Prefer modern C headers
2019-07-15 03:50:15 +09:00
b51d60ef5e Merge pull request #1082 from gaul/locking/fixes
Add a missing lock to SetCtime
2019-07-15 03:38:53 +09:00
58037da061 Fix multipart upload not setting curl options 2019-07-14 14:09:37 +02:00
1eb266588e Add a missing lock to SetCtime
Also lock before log statements which touch member variables.
2019-07-13 17:53:38 -07:00
deb560067e Merge pull request #1085 from gaul/no-recusive-locks
Remove recursive locking
2019-07-14 01:14:32 +09:00
4e351c59e3 fix coredump caused by fd_manager_lock locking 2019-07-13 12:03:27 +08:00
eb597289cb Avoid null dereference
Found via clang-tidy.
2019-07-12 18:40:24 -07:00
6fd42d9fe4 Prefer modern C headers
Found and fixed via clang-tidy.
2019-07-12 03:50:59 -07:00
efff9c01a6 Avoid misplaced const via removing unneeded typedef
Found via clang-tidy.
2019-07-12 03:41:19 -07:00
a83d5baa90 Remove recursive locking
Recursive locking is frowned upon and is incompatible with
PTHREAD_MUTEX_ERRORCHECK.  Also clean up pthread_mutex_lock error
checking.
2019-07-10 12:39:00 -07:00
50d13255e4 Merge pull request #1081 from gaul/write-blocks-readdir
Break FdEntity locks into data and metadata
2019-07-11 03:40:26 +09:00
5195fa93fa Merge pull request #1071 from gaul/macos/xattr
Run xattr tests on macOS
2019-07-07 17:37:00 +09:00
e5e63d6ac3 Merge pull request #1067 from gaul/mismatched-free
Fix mismatched new[]/free
2019-07-07 16:55:07 +09:00
7a65a414c3 Merge pull request #1080 from gaul/xml/simplification
Add simple XML parsing wrapper
2019-07-07 16:44:18 +09:00
4a192ffdf9 Run xattr tests on macOS 2019-07-07 00:39:45 -07:00
944d21cabb Fix mismatched new[]/free
Found via Valgrind.
2019-07-07 00:36:48 -07:00
d267212289 Merge pull request #1079 from gaul/cppcheck/shadowing
Avoid shadowing variable in FdEntity::Open
2019-07-07 16:29:42 +09:00
58d8e5586a Merge pull request #1078 from gaul/multipart/2x
Issue multipart when object size exceeds part size
2019-07-07 16:21:22 +09:00
ce803daf4a Merge pull request #1077 from gaul/openssl/locking
Annotate OpenSSL locking functions as unused
2019-07-07 16:10:14 +09:00
9bf34e2fda Merge pull request #1076 from gaul/doc/man-help
Add documentation for use_session_token and use_rrs
2019-07-07 15:45:42 +09:00
52218d2ddb Merge pull request #1075 from gaul/fortify-source
Compile with FORTIFY_SOURCE
2019-07-07 15:38:08 +09:00
6bd1a7eac0 Merge pull request #1074 from gaul/clang-tidy
Configure clang-tidy target
2019-07-07 15:28:08 +09:00
6177d7b096 Merge pull request #1073 from gaul/deps/s3proxy
Upgrade to S3Proxy 1.6.2
2019-07-07 15:19:00 +09:00
3161bf4608 Merge pull request #1070 from gaul/bash/nounset
Prohibit pipeline failures
2019-07-07 15:10:53 +09:00
2349dafb98 Merge pull request #1069 from gaul/macos/sed
Use system sed on macOS
2019-07-07 15:00:48 +09:00
1cd58d7828 Merge pull request #1035 from gaul/test/flags
Individually test multiple s3fs flags
2019-07-07 14:41:15 +09:00
8aa06d621a Add documentation for use_session_token and use_rrs
Fixes #929.
2019-07-06 10:04:43 -07:00
ecf13a8cb9 Break FdEntity locks into data and metadata
Previously long-running data operations like RowFlush would block
metadata operations like GetStats and thus user readdir.  Fixes #928.
2019-07-05 23:12:24 -07:00
b8ff6a647e Add simple XML parsing wrapper
Also simplify check_region_error.
2019-07-05 17:39:09 -07:00
49110c671d Avoid shadowing variable in FdEntity::Open
Found via cppcheck 1.88.
2019-07-05 15:06:18 -07:00
febaf6849f Issue multipart when object size exceeds part size
Previously s3fs issued multipart uploads when the object size was
twice the part size.  Conjoining this with the part size was confusing
and s3fs should add a separate tunable for this if needed, similar to
singlepart_copy_limit.  Fixes #1058.
2019-07-05 12:14:56 -07:00
4893174652 Annotate OpenSSL locking functions as unused
OpenSSL 1.1.0 removed these and generates warnings with newer
versions.
2019-07-05 10:45:57 -07:00
5820c72092 Compile with FORTIFY_SOURCE
This can catch some classes of buffer overflows.
2019-07-04 10:20:26 -07:00
4f23f38583 Individually test multiple s3fs flags
Remove unneeded comments; single part limits ensure that the tests
exercise multipart code paths even with smaller files.
References #971.
2019-07-03 21:09:40 -07:00
bbfa91141a Configure clang-tidy target
Also fix nits.
2019-07-03 14:04:11 -07:00
f439c6382f Upgrade to S3Proxy 1.6.2
Notably this includes support for List Objects v2 which AWS CLI uses.
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.6.2
2019-07-03 10:37:26 -07:00
21321a9d96 Merge pull request #1065 from gaul/doc/ssl-verify-hostname
Document ssl_verify_hostname
2019-07-03 22:15:33 +09:00
f03b50fd13 Merge pull request #1063 from gaul/truncate-2nd-open-fd
Flush file when opening second fd
2019-07-03 21:14:03 +09:00
15a870f9d9 Merge pull request #1061 from gaul/fdpagelist/value
Make fdpage a value type in fdpage_list_t
2019-07-03 21:04:16 +09:00
9472ee4a01 Prohibit pipeline failures 2019-07-03 02:57:40 -07:00
1f1f824da7 Use system sed on macOS
This requires fewer developer customizations to work on macOS.
Requires some GNU workarounds.
2019-07-02 22:17:02 -07:00
f02105c346 Document ssl_verify_hostname
References #1064.
2019-07-02 10:26:29 -07:00
c596441f58 Flush file when opening second fd
Previously when s3fs had dirty local data and an application opened a
second fd it would remove the stat cache entry, query S3 for the size
which was still zero, then reinitialize FdEntry with this incorrect
size.  Instead flush local data to S3 so this query works.  It is
possible that a more involved patch could do this with a less
heavyweight approach but this requires changing open.  This does not
completely fix git clone but allows it to proceed further.  Also make
some cache methods const-correct.  References #839.
2019-07-02 01:12:09 -07:00
455e29cbea Make fdpage a value type in fdpage_list_t
This simplifies memory management.
2019-07-01 09:30:48 -07:00
511d223468 Merge pull request #1059 from gaul/casting
Fix casting warning on 32-bit
2019-07-01 22:26:14 +09:00
5324c1b588 Merge pull request #1055 from gaul/test/litter
Remove litter from test_concurrency
2019-07-01 22:10:57 +09:00
554ea49294 Merge pull request #1051 from gaul/log/flags
Log flags as hexadecimal
2019-07-01 21:36:17 +09:00
d7f77a6282 Fix casting warning on 32-bit 2019-06-29 20:57:42 -07:00
048aea1151 Remove litter from test_concurrency 2019-06-26 20:29:27 -07:00
f1ad626b46 Log flags as hexadecimal 2019-06-24 16:13:07 -07:00
a78d8d1da4 Merge pull request #1049 from gaul/external-modication
Remove cache file when object time differs
2019-06-24 00:51:54 +09:00
fbebc6fa57 Merge pull request #1048 from gaul/log/curl
Emit older curl warnings only once
2019-06-24 00:42:44 +09:00
c18fc901c4 Merge pull request #1045 from gaul/test/umount-s3fs
Correct macOS integration test umount
2019-06-23 23:02:46 +09:00
245f14c8c1 Merge pull request #1041 from gaul/doc/typo
Fix singlepart_copy_limit default
2019-06-23 22:57:43 +09:00
d732eef776 Merge pull request #1039 from gaul/off_t
Convert file offsets from size_t to off_t
2019-06-23 22:51:04 +09:00
56b184fd0c Merge pull request #1032 from gaul/man/env
Add environment variables to man page
2019-06-23 22:42:39 +09:00
9e5eaad79b Remove cache file when object time differs
Check the modification times to determine whether an object has
updated.  This relies on low clock skew between s3fs and the S3
server; a more robust approach could use the ETag.  Fixes #1047.
2019-06-22 19:09:00 -07:00
738eaadcbf Emit older curl warnings only once
This makes Travis logs actually readable.
2019-06-22 10:01:20 -07:00
1cf3d2452e Correct macOS integration test umount
Previously errexit could prevent this from succeeding.
2019-06-19 11:01:32 -07:00
670dce6f4a Merge pull request #1036 from gaul/travis/macos
Do not update Homebrew in macOS builder
2019-06-18 22:24:36 +09:00
07cfdcf205 Fix singlepart_copy_limit default
Follows on to c5ebf5d328.
2019-06-15 17:38:14 -07:00
15b7450713 Convert file offsets from size_t to off_t
The latter is 64-bits on 32-bit platforms when specifying
-D_FILE_OFFSET_BITS=64.  This allows early Raspberry Pis to use files
larger than 2 GB.  It also cleans up some ugly casting.  Fixes #620.
Fixes #656.
2019-06-15 17:05:37 -07:00
272e0d3d46 Do not update Homebrew in macOS builder
This takes 5 minutes to run and can cause Travis timeouts.
References #1035.
2019-06-12 18:17:56 -07:00
8d8a2a66e5 Add environment variables to man page
Also fix typo in AWS credentials.
2019-05-15 18:49:18 +07:00
befc2e9e6f Merge pull request #1025 from gaul/nocopyapi
Update ctime during nocopyapi operations
2019-05-02 17:55:42 +09:00
059ab1f0f4 Update ctime during nocopyapi operations
Follows on to 2c43b1e12b.  Fixes #971.
2019-04-30 18:59:51 +09:00
f2fe1738cd Merge pull request #1021 from michallula/master
fix issue with aws session token provided inside ~/.aws/credentials file
2019-04-28 11:03:48 +09:00
0d4847596e fix issue with aws session token provided inside ~/.aws/credentials file 2019-04-25 14:29:35 +02:00
8e86ef1634 Merge pull request #1015 from gaul/c++03
Remove uses of std::map::at
2019-04-22 23:01:29 +09:00
a32a05962e Merge pull request #1014 from gaul/macro
Prefer simple over compound statements in macros
2019-04-22 22:49:23 +09:00
a7e81fda9b Merge pull request #1012 from gaul/doc/markdown
Address markdownlint warnings
2019-04-22 22:39:55 +09:00
9e4f9d4bdc Remove uses of std::map::at
This provides compatibility with C++03 and removes duplicate lookups.
2019-04-18 16:06:59 +09:00
0677a096a9 Prefer simple over compound statements in macros
This prohibits missing semicolons.
2019-04-17 21:46:24 +09:00
381835e2a9 Merge pull request #1008 from michallula/master
AWS session token support
2019-04-17 17:52:08 +09:00
af070fa3de Address markdownlint warnings 2019-04-17 08:09:31 +09:00
f9cd43b684 add session token support 2019-04-16 16:53:05 +02:00
7095787f1f Merge pull request #1011 from gaul/doc/man
Make man page and --help more consistent
2019-04-16 23:07:17 +09:00
4ca336aed0 Merge pull request #1003 from gaul/doc/installation
Standardize installation section
2019-04-16 22:30:51 +09:00
8a18806a57 Make man page and --help more consistent
Mostly small fixes but also some reordering.  References #929.  Found
via:

diff -u <(man --no-hyphenation doc/man/s3fs.1 | tr -s ' ' '\n' | sed '/^-o$/d' ) <(src/s3fs --help | tr -s ' ' '\n' | sed '/^-$/d')
2019-04-14 14:34:24 +09:00
e5e124b9aa Merge pull request #1006 from gaul/shellcheck
Address shellcheck errors
2019-04-09 23:39:11 +09:00
090ac7a8a0 Merge pull request #1005 from gaul/travis/simplify
Remove custom cppcheck from ppc64le Travis config
2019-04-09 23:29:06 +09:00
97af471aa6 Remove custom cppcheck from ppc64le Travis config 2019-04-09 21:25:58 +09:00
0176fc712b Address shellcheck errors 2019-04-09 21:11:31 +09:00
c426c896d0 Standardize installation section
Sort Linux distributions and simplify/standardize grammar.
2019-04-09 07:52:54 +09:00
0a99470369 Merge pull request #1002 from gaul/sse-c
Correctly calculate MD5 hash for SSE-C keys
2019-04-09 00:17:12 +09:00
cd280d8702 Merge pull request #1001 from gaul/missing-braces
Add missing braces
2019-04-08 23:55:02 +09:00
b1bade37d8 Merge pull request #1000 from ggtakec/master
Modified to discard _netdev option etc
2019-04-08 23:43:02 +09:00
005c186e1b Merge pull request #998 from gaul/doc/compilation
Move compilation instructions to separate file
2019-04-08 23:32:54 +09:00
6f8ecb28c5 Correctly calculate MD5 hash for SSE-C keys
Previously s3fs calculated the strlen of a binary input instead of
using the stored length.  This yielded IO errors when interacting with
SSE-encrypted objects.  Fixes #696.
2019-04-08 21:49:42 +09:00
4c28eb2a78 Move compilation instructions to separate file
Most users should use distribution packages and expert users can
consult this file.  References #991.
2019-04-08 15:56:46 +09:00
042332bcec Add missing braces
Found via clang-tidy.  Also fix errant indentation.
2019-04-07 23:12:27 +09:00
071cd0f849 Modified to discard _netdev option etc 2019-04-07 11:51:55 +00:00
d7bb834bcb Merge pull request #999 from gaul/malloc
Prefer new over malloc
2019-04-07 20:21:58 +09:00
9b437fc1fe Merge pull request #997 from gaul/doc/http
Prefer HTTPS links in documentation where possible
2019-04-07 19:44:38 +09:00
6f6a67807b Prefer new over malloc
The former cannot return NULL and will allow use of scoped pointers.
2019-04-07 16:54:24 +09:00
e5785d4690 Prefer HTTPS links in documentation where possible
This also corrects a test URL which was HTTPS in practice.
2019-04-05 17:31:06 +09:00
a4ce54d615 Merge pull request #995 from jaygooby/master
Work with filenames that include spaces
2019-04-02 23:17:39 +09:00
ddbcec5c96 Work with filenames that include spaces
• Simplified the stat generation line (single exec using only stat)
• Quote variables so that the cache directory can also include spaces
• while/read loop to handle spaces in cached files
  (IFS was causing problems when all the files were saved into a single variable)
2019-04-02 10:35:25 +01:00
7cbb4c958b Merge pull request #994 from gaul/doc/nits
Add commas to see also
2019-03-31 07:27:44 +09:00
6c5adbb9af Add commas to see also 2019-03-30 23:25:58 +09:00
4db6e1a10a Merge pull request #993 from ggtakec/test
Fixed a bug about overwrite metadata at updating directory stats
2019-03-30 01:18:48 +09:00
ea517c80a4 Fixed a bug about overwrite metadata at updating directory stats 2019-03-29 15:30:30 +00:00
9f6ed6c08e Merge pull request #992 from ggtakec/master
Added see also section to man page
2019-03-29 09:23:18 +09:00
b1ddb483a4 Added see also section to man page 2019-03-28 23:59:19 +00:00
17352ef4fd Merge pull request #987 from ggtakec/master
Support undefined CURLoption in libcurl library used in build
2019-03-22 20:34:48 +09:00
71766039ff Support undefined CURLoption in libcurl library used in build 2019-03-22 10:47:42 +00:00
c607c9be58 Merge pull request #985 from ggtakec/master
Fixed configure.ac about abuses AC_CHECK_FILE
2019-03-22 15:19:04 +09:00
df604e50fb Fixed configure.ac about abuses AC_CHECK_FILE 2019-03-22 05:22:23 +00:00
876662ff89 Merge pull request #982 from gaul/cppcheck/find-first-of
Simplify string comparison
2019-03-22 13:59:45 +09:00
058706014b Simplify string comparison
Found via newer cppcheck.
2019-03-14 22:19:33 +09:00
99ec09f13a Merge pull request #978 from ggtakec/master
Updated ChangeLog and configure.ac etc for release 1.85
2019-03-11 21:32:25 +09:00
4a011d87e0 Updated ChangeLog and configure.ac etc for release 1.85 2019-03-11 11:53:57 +00:00
c6edc2cd8f Merge pull request #977 from gaul/readme/homebrew
Make macOS instructions consistent with others
2019-03-11 20:26:26 +09:00
cc196bfdf0 Make macOS instructions consistent with others 2019-03-11 14:36:36 +09:00
895d5006bb Merge pull request #975 from ggtakec/master
Fixed ref-count when error occurred.
2019-03-10 15:09:09 +09:00
62dcda6a56 Fixed ref-count when error occurred. 2019-03-10 06:04:19 +00:00
cbf072bc55 Merge pull request #967 from gaul/reference-count
Increase FdEntity reference count when returning
2019-03-10 14:27:10 +09:00
1b4d2a32d2 Merge pull request #974 from ggtakec/master
Changed the description order of man page options
2019-03-10 13:43:51 +09:00
b71c90bbe1 Changed the description order of man page options 2019-03-10 04:32:10 +00:00
80344aafd3 Merge pull request #972 from gaul/readme/amazon-linux
Add instructions for Amazon Linux
2019-03-10 13:11:04 +09:00
b5ca400500 Merge pull request #970 from gaul/nocopyapi
Remove from file from stat cache during rename
2019-03-10 12:56:04 +09:00
2e89439120 Merge pull request #969 from gaul/typo/lazy
Fix lazy typo
2019-03-10 12:40:59 +09:00
555410386c Add instructions for Amazon Linux 2019-03-09 22:25:06 +09:00
08b132ddb9 Remove from file from stat cache during rename
This addresses failures with test_rename_before_close when using
nocopyapi.  Note that test_update_time still fails.
2019-03-09 17:53:35 +09:00
1e86cc643d Fix lazy typo 2019-03-09 17:46:02 +09:00
f53503438c Increase FdEntity reference count when returning
Previously s3fs had a race condition where one client could delete
FdEntity that another client was using.  Add a simple concurrent test
which previously failed but now succeeds.  Fixes #964.
2019-03-09 15:55:04 +09:00
0d43d070cc Merge pull request #965 from ggtakec/fix_multi_x
Improvement of curl session pool for multipart
2019-02-27 22:44:25 +09:00
0791fdca2a Merge pull request #960 from kristjanvalur/wtf8
Wtf8
2019-02-27 21:21:38 +09:00
6e8678d5e3 remove lines that wer accidentally duplicated 2019-02-25 16:02:01 +00:00
10d9f75366 Improvement of curl session pool for multipart
Improvement of curl session pool for multipart(2)

Improvement of curl session pool for multipart(3)
2019-02-25 14:46:24 +00:00
77993e607e Merge pull request #961 from gaul/cppcheck
Work around cppcheck warnings
2019-02-24 11:24:28 +09:00
74d8671e54 Work around cppcheck warnings
Follows on to eeb839242b.
2019-02-20 21:55:41 +09:00
4c41eac29c fix documentation and man page 2019-02-20 11:24:29 +00:00
3c97c1b251 merged main 2019-02-19 10:53:00 +00:00
84c671a81a fix indentation 2019-02-19 10:37:43 +00:00
f336bdebcc add command line flag and documentation 2019-02-19 10:32:37 +00:00
e5b8377202 fix comments and code 2019-02-19 10:32:10 +00:00
4f42f4ab0c Enable s3fs encoding and decoding in the fuse interface 2019-02-18 13:36:48 +00:00
11b385820d more robust wtf8 encoding 2019-02-18 12:27:44 +00:00
f1a9eaee54 Merge pull request #958 from gaul/readme/inotify
Document lack of inotify support
2019-02-17 23:28:24 +09:00
ffee8d5f39 Merge pull request #959 from ggtakec/master
Fixed code for latest cppcheck error on OSX
2019-02-17 23:19:17 +09:00
eeb839242b Fixed code for latest cppcheck error on OSX 2019-02-17 13:59:11 +00:00
f7760976a5 Document lack of inotify support
References #45.
2019-02-17 19:45:08 +09:00
ca2d1d873d Adding utility functions to convert invalid utf8 to wtf8 encoding
This is to deal with windows clients who pass in cp1252 as if it
were utf8
2019-02-15 15:57:03 +00:00
951761ee2c Merge pull request #951 from ggtakec/master
Added a non-interactive option to utility mode
2019-02-11 03:03:01 +09:00
231fd001d9 Added a non-interactive option to utility mode
Rebase for resolving conflicts

Deleted interactive mode on utility mode

Fixed minor mistakes
2019-02-10 17:36:18 +00:00
e00afa8128 Merge pull request #957 from gaul/deps/s3proxy-1.6.1
Upgrade to S3Proxy 1.6.1
2019-02-10 23:32:13 +09:00
e9297f39ea Upgrade to S3Proxy 1.6.1
Notably, this improve performance when listing objects using the
filesystem backend, allowing new tests like expanding large tar files.
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.6.1
2019-02-06 17:06:43 -08:00
314dc5a398 Merge pull request #955 from gaul/clang-tidy/misc
Address miscellaneous clang-tidy warnings
2019-02-06 21:21:54 +09:00
e07cb020cc Merge pull request #954 from gaul/clear-iter
Clear containers instead of individual erases
2019-02-06 21:12:36 +09:00
9f79b9e0da Merge pull request #953 from gaul/readme/s3ql
Update s3ql link
2019-02-06 21:03:28 +09:00
e87e40b3b4 Address miscellaneous clang-tidy warnings
Also add configuration with suppressions.
2019-02-04 22:58:40 -08:00
f0f95478ec Update s3ql link 2019-02-04 21:03:46 -08:00
bd66b57ad3 Merge pull request #952 from gaul/multipart/failed
Automatically abort failed multipart requests
2019-02-04 22:19:27 +09:00
a1d3ff9766 Automatically abort failed multipart requests
This can avoid dangling parts.  However, many transfers fail due to
network errors so we still need other mechanisms to handle these
parts.
2019-02-03 10:29:20 -08:00
7f61a947c2 Merge pull request #950 from ggtakec/master
Added S3FS_MALLOC_TRIM build switch
2019-02-03 17:23:09 +09:00
4d0bef1e90 Clear containers instead of individual erases
This has O(n) runtime instead of O(n log n).
2019-02-02 23:58:43 -08:00
960823fb40 Added S3FS_MALLOC_TRIM build switch 2019-02-03 07:36:17 +00:00
c04e8e7a9d Merge pull request #949 from gaul/503-slow-down
Implement exponential backoff for 503
2019-02-03 15:19:28 +09:00
fb6debd986 Merge pull request #948 from gaul/too-many-parts
Add logging for too many parts
2019-02-03 14:55:54 +09:00
d8185a25aa Merge pull request #946 from gaul/async/completed-tids
Simplify async request completion code
2019-02-03 14:08:49 +09:00
53337a0a28 Merge pull request #944 from gaul/utility-mode
Repair utility mode
2019-02-03 13:59:56 +09:00
ae51556d04 Merge pull request #943 from gaul/hard-link
Return not supported when hard linking
2019-02-03 12:45:21 +09:00
b3de9195a7 Merge pull request #942 from gaul/c++03
Ensure s3fs compiles with C++03
2019-02-03 12:35:49 +09:00
055ecf6ea7 Merge pull request #940 from gaul/parallel-multipart-copy
Copy parts in parallel
2019-02-03 11:44:15 +09:00
c603680e02 Merge pull request #939 from gaul/stringstream-specific
Prefer specific [io]stringstream where possible
2019-02-03 11:06:57 +09:00
814aadd7e3 Merge pull request #938 from gaul/clang-tidy/redundant-void
Remove unneeded void parameter
2019-02-03 10:57:56 +09:00
dce63d1529 Merge pull request #937 from gaul/malloc-trim
Disable malloc_trim
2019-02-03 10:48:15 +09:00
8ff05d8e38 Implement exponential backoff for 503
Amazon returns SlowDown when overloaded.  Also return ENOTSUP for 501
and immediately return EIO for 500 instead of retrying.  Fixes #603.
2019-02-02 00:03:50 -08:00
dfa84b82a8 Add logging for too many parts
References #610.
2019-02-01 19:34:26 -08:00
6ac8618381 Ensure s3fs compiles with C++03 2019-02-01 08:33:59 -08:00
8c527c3616 Simplify async request completion code
Workers now notify the master thread when they complete, unifying the
Linux and macOS code paths.  This also avoids excessive
pthread_tryjoin_np calls.  Follows on to
88cd8feb05.
2019-01-31 22:55:23 -08:00
54a074647e Repair utility mode
This deinitialized S3fsCurl twice and incorrectly calculated V4
signatures.
2019-01-31 18:45:39 -08:00
c5ebf5d328 Copy parts in parallel
S3 can copy multipart much faster than single part due to IO
parallelization.  Renaming a 4 GB file reduces from 72 to 20 seconds
with bigger gains with larger files.
2019-01-31 10:21:39 -08:00
43c6ef560e Return not supported when hard linking
This is more correct than permission denied.
2019-01-30 16:43:04 -08:00
3076abc744 Disable malloc_trim
This avoids walking the entire heap multiple times for complex
operations like readdir.  This does not entirely eliminate the
observed performance regression but does dramatically reduce s3fs CPU
usage.  References #935.  Fixes #936.
2019-01-29 15:29:07 -08:00
07636c8a8d Prefer specific [io]stringstream where possible
These better communicate intent and are slightly more efficient.
2019-01-29 10:44:33 -08:00
35d55ee513 Remove unneeded void parameter
This is implicit in C++.  Found and fixed via clang-tidy.
2019-01-28 23:22:27 -08:00
a442e843be Merge pull request #934 from ggtakec/master
Checked and corrected all typo
2019-01-27 21:33:16 +09:00
c0cf90cf8b Checked and corrected all typo 2019-01-27 12:04:29 +00:00
3b1cc3b197 Merge pull request #933 from gaul/cache/remove-mirror-path
Remove mirror path when deleting cache
2019-01-27 16:15:49 +09:00
a0c1f30ae7 Merge pull request #932 from gaul/autolock
Prefer AutoLock for synchronization
2019-01-27 15:59:18 +09:00
8822a86709 Merge pull request #931 from gaul/doc/typo
Correct sigv2 typo
2019-01-27 15:48:24 +09:00
98f397de0e Merge pull request #930 from gaul/doc/md5-multipart
Correct enable_content_md5 docs
2019-01-27 15:36:56 +09:00
fd4d23f8f7 Merge pull request #926 from kzidane/master
Accept paths with : in them
2019-01-27 15:23:24 +09:00
4820f0a42b Merge pull request #925 from gaul/clang-tidy/delete-null
Remove redundant null checks before delete
2019-01-27 15:15:31 +09:00
807a618cf7 Merge pull request #924 from gaul/clang-tidy/empty
Prefer empty over size checks
2019-01-27 15:04:52 +09:00
a93e500b44 Remove mirror path when deleting cache
Fixes #827.
2019-01-25 18:10:03 -08:00
92d3114584 Prefer AutoLock for synchronization
This simplifies the code and fixes an issue with unlocked access.
Also use a recursive lock for StatCache to avoid races between
lock..unlock..lock sequences.
2019-01-25 15:28:41 -08:00
5062d6fbd9 Correct sigv2 typo 2019-01-25 14:30:30 -08:00
7d14ebaf09 Correct enable_content_md5 docs
Both S3fsCurl::PutRequest and S3fsCurl::UploadMultipartPostSetup can
calculate and send Content-MD5 to the server.  Remove spurious comment
about large files and make man page and help consistent.
References #929.
2019-01-25 14:27:52 -08:00
cd794a6985 Accept paths with : in them 2019-01-23 14:56:25 -05:00
84b421d6ef Prefer empty over size checks
Found and fixed via clang-tidy.
2019-01-23 11:30:28 -08:00
8316da5bbe Remove redundant null checks before delete
Found by clang-tidy.
2019-01-23 11:25:25 -08:00
fa287aeef7 Merge pull request #923 from ggtakec/master
Reverted automatic region change and changed messages
2019-01-23 23:00:31 +09:00
caaf4cac55 Reverted #912(Automatic region change) and added message 2019-01-23 13:23:03 +00:00
010276ceab Merge pull request #921 from gaul/clang-tidy/redundant-string-init
Remove redundant string initializations
2019-01-23 19:44:59 +09:00
f219817eb3 Merge pull request #920 from gaul/clang-tidy/string-copy
Remove unnecessary string copies
2019-01-23 19:30:55 +09:00
d487348d21 Merge pull request #919 from gaul/test/mv-nonempty-dir
Add test for mv non-empty directory
2019-01-23 19:21:12 +09:00
eb0b29708f Merge pull request #918 from gaul/overwrite-file-range
Load tail range during overwrite
2019-01-23 19:12:23 +09:00
877842a720 Merge pull request #917 from gaul/readme/aws-cli
Reference better-known AWS CLI for compatibility
2019-01-23 18:38:08 +09:00
1fc25e8c3f Remove redundant string initializations
Found and fixed via clang-tidy.
2019-01-22 23:16:37 -08:00
61ecafd426 Remove unnecessary string copies
Found via clang-tidy.
2019-01-22 23:09:37 -08:00
79bd3441eb Add test for mv non-empty directory 2019-01-22 22:13:17 -08:00
5f5da4b2cb Load tail range during overwrite
Previously s3fs experienced data loss when writing to the middle of a
file.  Corrupt files would have the expected data from 0..offset+size
but unexpected NUL bytes from offset+size..EOF.  References #808.
2019-01-22 22:02:40 -08:00
dede19d8c0 Reference better-known AWS CLI for compatibility 2019-01-21 17:32:57 -08:00
fada95f58e Merge pull request #914 from gaul/readdir/head-of-line
Issue readdir HEAD requests without batching
2019-01-21 22:10:50 +09:00
014b8c5982 Merge pull request #913 from gaul/assert
Prefer abort over assert(false)
2019-01-21 21:56:41 +09:00
46d79c5bc2 Issue readdir HEAD requests without batching
Previously s3fs would issue a batch of HEAD requests and wait for all
to succeed before issuing the next batch.  Now it issues the first
batch and only waits for a single call to succeed before issuing the
next call.  This can improve performance when one call lags due to
network errors.  I measured 25% improvement with the same level of
parallelism.  This commit also reparents parallelism knobs for
consistency.  Follows on to 88cd8feb05.
Fixes #223.
2019-01-20 18:07:22 -08:00
40ba3b44a1 Prefer abort over assert(false)
The compiler can remove the latter when compiled with NDEBUG which may
cause unintended control flow.
2019-01-20 12:30:27 -08:00
beadf95975 Merge pull request #912 from ggtakec/master
Automatic region change made possible other than us-east-1(default)
2019-01-20 20:13:07 +09:00
2887f8916b Automatic region change made possible other than us-east-1(default) 2019-01-20 10:51:49 +00:00
0c9a8932f7 Merge pull request #911 from ggtakec/master
Added detail error message when HTTP 301/307 status
2019-01-20 19:31:04 +09:00
ac72431195 Added detail error message when HTTP 301/307 status 2019-01-20 10:07:58 +00:00
2a7877beff Merge pull request #910 from ggtakec/master
Added a missing extension to .gitignore, and formatted dot files
2019-01-20 18:38:37 +09:00
7a56459103 Added a missing extension to .gitignore, and formatted dot files 2019-01-20 09:18:40 +00:00
5292fa74d1 Merge pull request #909 from ggtakec/master
Ignore after period character of floating point in x-amz-meta-mtime
2019-01-20 17:58:55 +09:00
f2184e34dd Ignore after period character of floating point in x-amz-meta-mtime 2019-01-20 08:28:06 +00:00
1d4867830b Merge pull request #908 from ggtakec/master
Added an error message when HTTP 301 status
2019-01-20 16:59:43 +09:00
36a4903843 Added an error message when HTTP 301 status 2019-01-20 07:17:40 +00:00
c83a3e67c9 Merge pull request #885 from LutzFinsterle2019/master
Update s3fs_util.cpp for correspondence of Nextcloud contype
2019-01-20 15:32:58 +09:00
05014c49c8 Merge pull request #906 from gaul/doc/https
Prefer HTTPS links where possible
2019-01-20 15:10:51 +09:00
aa69107165 Merge pull request #905 from gaul/clang-tidy/redundant
Fix comparison in s3fs_strtoofft
2019-01-20 15:01:40 +09:00
d373b0eca3 Merge pull request #904 from gaul/clang-tidy/c-str
Remove unnecessary calls to std::string::c_str
2019-01-20 14:13:46 +09:00
6aa40b2747 Merge pull request #903 from gaul/clang-tidy/find_char
Prefer find(char) over find(const char *)
2019-01-20 14:05:06 +09:00
34c3bfe408 Merge pull request #902 from gaul/clang-tidy/pass-by-value
Avoid pass-by-value when not necessary
2019-01-20 13:45:44 +09:00
6ac56e722d Merge pull request #901 from gaul/clang-tidy/leaks
Plug memory leaks
2019-01-20 12:41:08 +09:00
61dc7f0a70 Merge pull request #900 from gaul/leak
Plug memory leak
2019-01-20 12:30:31 +09:00
9f000957dd Merge pull request #899 from gaul/response-code
Tighten up HTTP response code check
2019-01-20 11:16:23 +09:00
b2141313e2 Merge pull request #898 from gaul/rename-before-close
Flush file before renaming
2019-01-20 10:13:20 +09:00
aa9bd1fa3c Prefer HTTPS links where possible
Fix a few stale links as well.
2019-01-18 11:09:08 -08:00
5a2dc03a1c Fix comparison in s3fs_strtoofft
Also backfill unit tests.  Document limitations.  Found via
clang-tidy.
2019-01-17 22:59:25 -08:00
508fafbe62 Remove unnecessary calls to std::string::c_str
Found via clang-tidy.
2019-01-17 22:05:16 -08:00
e29548178b Prefer find(char) over find(const char *)
The former can be faster.  Found via clang-tidy.
2019-01-17 20:24:24 -08:00
ab2f36f202 Plug memory leaks
Found via clang-tidy.
2019-01-17 18:54:34 -08:00
b8c9fcfd70 Avoid pass-by-value when not necessary
This requires unnecessary memcpy.  Found via clang-tidy.
2019-01-17 18:22:11 -08:00
58ce544e83 Plug memory leak
Previously this appended to an empty list, zeroed the list, then
appended to the list.  Instead zero the list first and then append.
This also enables sending Content-MD5 which can eagerly detect
transmission errors.  Found via Valgrind.
2019-01-17 16:24:34 -08:00
e98ce36301 Tighten up HTTP response code check
Previously s3fs considered 3xx codes as successful.  When writing an
object to a misconfigured region, s3fs did not propagate the 301 as a
write error to the client.  References #693.
2019-01-17 15:04:59 -08:00
6401b4ae92 Flush file before renaming
Previously s3fs could copy the zero-byte stub object without including
any pending writes.  Fixes #145.
2019-01-17 12:05:10 -08:00
25b49e1a2e Merge pull request #894 from gaul/default-mode
Default uid/gid/mode when object lacks permissions
2019-01-16 18:48:54 +09:00
c7def35b54 Merge pull request #895 from gaul/bucket-name-dot
Emit more friendly error for buckets with dots
2019-01-16 18:39:47 +09:00
ddba1c63c5 Merge pull request #893 from gaul/ctime
Store and retrieve file change time
2019-01-16 18:30:15 +09:00
c512516e14 Emit more friendly error for buckets with dots
These fail SSL certificate checks due to the *.s3.amazon.com wildcard.
Fixes #284.
2019-01-14 18:47:36 -08:00
2c43b1e12b Store and retrieve file change time
This introduces a new header with the change time; existing objects
will report modification time.  Fixes #771.
2019-01-14 10:05:11 -08:00
b68d97c6bf Merge pull request #892 from gaul/test/xattr
Repair xattr tests
2019-01-14 21:26:46 +09:00
f1757e4343 Merge pull request #888 from gaul/readme
Add Server Fault to FAQs
2019-01-14 21:17:06 +09:00
e2d5641d99 Default uid/gid/mode when object lacks permissions
This addresses a common use case when interacting with objects from
both s3fs and other S3 tools.  Fixes #890.
2019-01-13 21:57:23 -08:00
523fe1e309 Repair xattr tests
These did not run due to missing use_xattr flag and Travis
misconfiguration.
2019-01-13 18:54:16 -08:00
c985b5e4d0 Corrected Comment to C++ style 2019-01-12 10:19:48 +01:00
786f1a8fc7 Add Server Fault to FAQs
Also correctly format Stack Overflow.
2019-01-10 11:17:32 -08:00
18cb2e2662 Update s3fs_util.cpp
Sorry for answering late, have been busy lately.
The comment is: "Nextcloud stores Directory objects with this mime type when mounting a Bucket as external Storage"
2019-01-10 07:44:36 +01:00
743c706b0a Update s3fs_util.cpp
Nextcloud Compatibility in directory mime-types
2019-01-07 07:41:27 +01:00
4ed0e5f35a Merge pull request #882 from earlchew/issue-817
[curl] Assume long encryption keys are base64 encoded
2019-01-06 17:29:04 +09:00
fd6b37d3da Merge pull request #877 from gaul/aws/credentials
Check arguments and environment before .aws/creds
2019-01-06 17:03:11 +09:00
56e24de0d4 Merge pull request #870 from gaul/typos
Correct typos in command-line parsing
2019-01-06 16:21:46 +09:00
2780043a7d Merge pull request #856 from gaul/doc/icon
Add icon for s3fs
2019-01-06 15:54:24 +09:00
54c9e48bb7 Merge pull request #874 from gaul/cppcheck-1.86
Address cppcheck 1.86 errors
2019-01-06 15:20:28 +09:00
ed5795eead [curl] Assume long encryption keys are base64 encoded
Correct tabs and whitespace.

Signed-off-by: Earl Chew <earl_chew@yahoo.com>
2019-01-05 21:08:41 +00:00
3d225163f8 Correct typos in command-line parsing
Also fix stray macOS references.  References #869.
2019-01-02 14:40:57 -08:00
0569cec3ea Check arguments and environment before .aws/creds
Fixes #857.  Fixes #864.
2018-12-20 15:51:32 -08:00
a2f8ac535e Address cppcheck 1.86 errors
Lifetime, shadowing, and unused variables.  Found via the Travis macOS
builder.
2018-12-20 14:56:31 -08:00
29355d75b0 Add icon for s3fs
This is suitable for the s3fs-fuse organization.
2018-12-15 17:12:28 -08:00
d9e89deef6 Merge pull request #865 from orozery/multihead_warning_check
fix multihead warning check
2018-11-29 20:53:47 +09:00
6b051eac47 Merge pull request #866 from Basavaraju013/master
Multi-arch support for ppc64le
2018-11-29 20:37:02 +09:00
da997de918 Merge pull request #861 from mcgitty/patch-1
Add 'profile' option to command line help.
2018-11-29 20:15:01 +09:00
d97094fb8d Multi-support for ppc64le 2018-11-28 05:15:09 -06:00
b91fc5409e fix multihead warning check 2018-11-28 09:54:02 +02:00
3c970646d1 Add 'profile' option to command line help. 2018-11-19 08:26:23 -08:00
a92668ae78 Merge pull request #859 from gaul/upload-remove-batching
Upload S3 parts without batching
2018-11-18 22:27:20 +09:00
88cd8feb05 Upload S3 parts without batching
Previously s3fs would issue a batch of uploads and wait for all to
succeed before issuing the next batch.  Now it issues the first batch
and only waits for a single part to succeed before uploading the next
part.  This can improve performance when one part lags due to network
errors.  Fixes #183.
2018-11-16 18:32:38 -08:00
91c16f826a Merge pull request #855 from gaul/readme/stackoverflow
Include StackOverflow in FAQs
2018-11-14 22:32:10 +09:00
d4d60ff315 Include StackOverflow in FAQs 2018-11-13 22:19:17 -08:00
e8033f96de Merge pull request #853 from gaul/readme-tilde
Replace ~ with ${HOME} in examples
2018-11-11 11:47:44 +09:00
5fba542a29 Merge pull request #852 from gaul/aws-credentials-file
Allow credentials from ${HOME}/.aws/credentials
2018-11-11 11:37:38 +09:00
44de3ffa05 Merge pull request #851 from gaul/list-object-max-keys
Correctly compare list_object_max_keys
2018-11-11 11:26:31 +09:00
2efa6df028 Merge pull request #849 from gaul/readme
Correct typo
2018-11-11 11:17:05 +09:00
9e530c86ae Allow credentials from ${HOME}/.aws/credentials
This matches the configuration from popular tools like AWS CLI and
allows multiple profile names via -o profile=name.  The existing
credential mechanisms continue to work.  Fixes #822.
2018-11-04 17:47:07 -08:00
95857733a1 Replace ~ with ${HOME} in examples
Several of these do not work since the shell does not replace ~ in the
middle of a token, e.g., -o passwd=~/.passwd .  Replace all of them
for consistency.  Fixes #836.
2018-11-04 17:45:16 -08:00
664f910083 Correctly compare list_object_max_keys
Previously this did not allow an argument.  Fixes #843.
References #793.
2018-11-04 10:49:35 -08:00
735e4b0848 Correct typo 2018-10-29 21:42:48 -07:00
e8d76a6f58 Merge pull request #840 from juliogonzalez/macOS
Replace all mentions to MacOS X to macOS
2018-10-28 20:33:51 +09:00
0a6926be54 Merge pull request #835 from juliogonzalez/generic-compilation
Make the compilation instructions generic
2018-10-28 20:00:21 +09:00
830a971bde Replace all mentions to MacOS X to macOS 2018-10-14 23:23:19 +02:00
4779d14d7d Make the compilation instructions generic 2018-10-08 22:27:01 +02:00
8929a27a24 Merge pull request #834 from juliogonzalez/issue-improvements
Improve template for issues
2018-10-09 00:37:20 +09:00
eea624c171 Merge pull request #833 from juliogonzalez/doc
New installation instructions for Fedora >= 27 and CentOS7
2018-10-09 00:05:36 +09:00
cdaf4a9674 Merge branch 'master' into doc 2018-10-08 23:56:20 +09:00
6fe92d5ed6 Merge pull request #832 from gaul/hex
Simplify hex conversion
2018-10-08 23:03:42 +09:00
8649a68766 Merge pull request #831 from JoohnSF/onezone_ia_support
Add support for storage class ONEZONE_IA.
2018-10-08 22:57:12 +09:00
af005b6e5e Merge pull request #826 from juliogonzalez/centos-setup
For RPM distributions fuse-libs is enough
2018-10-08 22:50:29 +09:00
b19d2ae78f Improve template for issues 2018-10-07 01:20:10 +02:00
5634f9bdcd New installation instructions for Fedora >= 27 and CentOS7 2018-10-07 00:52:45 +02:00
c703fa15c0 Simplify hex conversion
Addresses GCC 8 warning:

common_auth.cpp: In function ‘std::__cxx11::string s3fs_sha256sum(int, off_t, ssize_t)’:
common_auth.cpp:84:12: warning: ‘char* strncat(char*, const char*, size_t)’ output may be truncated copying 2 bytes from a string of length 2 [-Wstringop-truncation]
    strncat(sha256, hexbuf, 2);
2018-10-01 19:08:27 -07:00
d9c106cfde Add support for storage class ONEZONE_IA. 2018-09-30 14:27:31 +02:00
203f78fdae For RPM distributions fuse-libs is enough 2018-09-23 19:35:17 +02:00
c5af62b023 Merge pull request #820 from gaul/big-writes
Enable big writes if capable
2018-09-17 17:24:06 +09:00
dcd70daf48 Merge pull request #819 from soulprovidr/master
#691: Made instructions for creating password file more obvious.
2018-09-17 17:01:53 +09:00
8263919b0e Merge pull request #812 from mapreri/typo
Fix typo s/mutliple/multiple/
2018-09-17 16:52:48 +09:00
97488e603f Merge pull request #804 from DreamFlasher/patch-2
add Backblaze B2
2018-09-17 16:43:55 +09:00
41c23adb0e [curl] Assume long encryption keys are base64 encoded
Amazon SSE-C https://tinyurl.com/ychug4cg writes:

> Use this header to provide the 256-bit, base64-encoded encryption key
> for Amazon S3 to use to encrypt or decrypt your data.

It seems likely that future keys would be as long, or longer, so
this change assumes that text longer than 32 characters are keys
that are base64 encoded.

Signed-off-by: Earl Chew <earl_chew@yahoo.com>
2018-09-16 21:23:22 +00:00
a85183d42c Enable big writes if capable
Fixes #813.
2018-09-10 22:21:51 -07:00
45b67b9604 #691: Made instructions for creating password file more obvious. 2018-09-09 23:11:12 -05:00
c376efdd28 Fix typo s/mutliple/multiple/
Signed-off-by: Mattia Rizzolo <mattia@mapreri.org>
2018-08-19 00:14:57 +02:00
4c5f510207 add Backblaze B2
this is relevant because Backblaze B2 is fairly popular and thus mentioning it helps s3fs
2018-07-17 13:49:31 +02:00
06032aa661 Merge pull request #795 from ggtakec/master
Updated ChangeLog and configure.ac for release 1.84
2018-07-08 18:22:54 +09:00
e8fb2aefb3 Updated ChangeLog and configure.ac for release 1.84 2018-07-08 09:06:52 +00:00
3cb6c5e161 Merge pull request #793 from ggtakec/master
Added list_object_max_keys option based on #783 PR
2018-07-08 13:08:47 +09:00
7e0c53dfe9 Added list_object_max_keys option based on #783 PR 2018-07-08 03:49:10 +00:00
c2ca7e43b6 Merge pull request #789 from juliogonzalez/doc-opensuse-suse
Instructions for SUSE and openSUSE prebuilt packages
2018-07-08 11:42:05 +09:00
ae47d5d349 Merge pull request #786 from ambiknai/log_enhancements
Log messages for 5xx and 4xx HTTP response code
2018-07-08 11:28:54 +09:00
35d3fce7a0 Review comment: Include the error code being returned 2018-07-06 05:14:32 -04:00
4177d8bd3b Review comment: Include the error code being returned 2018-07-06 03:03:57 -04:00
ad5349a488 Changes as per review comments 2018-07-05 05:02:04 -04:00
6b57a8c1fc Instructions for SUSE and openSUSE prebuilt packages 2018-07-05 10:23:26 +02:00
92a4034c5e Log messages for 5xx and 4xx HTTP response code 2018-07-04 03:50:45 -04:00
3e4002df0d Merge pull request #780 from wjt/initialize-libgcry
gnutls_auth: initialize libgcrypt
2018-06-24 12:48:08 +09:00
1b9ec7f4fc Merge pull request #774 from nkkashyap/master
Option for IAM authentication endpoint
2018-06-24 12:36:23 +09:00
4a7c4a9e9d Merge pull request #781 from ggtakec/master
Fixed an error by cppcheck on OSX
2018-06-24 12:22:35 +09:00
0d3fb0658a Fixed a error by cppcheck on OSX 2018-06-24 02:38:59 +00:00
73cf2ba95d gnutls_auth: initialize libgcrypt
Without this change, the following warning appears in the syslog/journal
during startup:

  Libgcrypt warning: missing initialization - please fix the application

From the [documentation][0]:

> The function `gcry_check_version` initializes some subsystems used by
> Libgcrypt and must be invoked before any other function in the
> library.

Fixes #524, which says:

> gnutls is initialized by gnutls_global_init() function and
> gcry_check_version() function for initializing libgcry is called from
> this gnutls_global_init().

I checked the gnutls source and it hasn't contained a call to
gcry_check_version() since the libgcrypt backend was removed in 2011
(commit 8116cdc8f131edd586dad3128ae35dd744cfc32f). In any case, the
gcry_check_version() documentation continues:

> It is important that these initialization steps are not done by a
> library but by the actual application.

so it would be incorrect for a library used by s3fs to initialize
libgcrypt.

[0]: https://www.gnupg.org/documentation/manuals/gcrypt/Initializing-the-library.html
2018-06-21 20:55:00 +01:00
5a481e6a01 Option for IBM IAM auth endpoint added return 2018-06-04 16:44:14 +05:30
d8e12839af Option for IBM IAM auth endpoint 2018-05-31 16:02:48 +05:30
3bf05dabea Merge pull request #769 from orozery/revert_to_async_read
Revert "enable FUSE read_sync by default"
2018-05-28 20:23:54 +09:00
d4e86a17d1 Revert "enable FUSE read_sync by default"
This reverts commit 86b0921ac4.

Conflicts:
	src/s3fs.cpp
2018-05-28 13:49:54 +03:00
6555e7ebb0 Merge pull request #768 from ggtakec/master
Fixed memory leak
2018-05-27 20:10:16 +09:00
ae9d8eb734 Fixed memory leak 2018-05-27 10:48:03 +00:00
e49d594db4 Merge pull request #766 from gaul/s3fs-python
Remove s3fs-python
2018-05-27 16:43:27 +09:00
66bb0898db Merge pull request #765 from gaul/debian
Add Debian installation instructions
2018-05-27 16:35:51 +09:00
b323312312 Remove s3fs-python
This no longer exists.
2018-05-23 16:06:41 -07:00
58e52bad4f Add Debian installation instructions 2018-05-23 16:03:02 -07:00
57b2a60172 Merge pull request #764 from orozery/remove_false_multihead_warnings
Remove false multihead warnings
2018-05-23 22:38:35 +09:00
212bbbbdf0 Merge pull request #763 from orozery/cleanup_share_after_handles
cleanup curl handles before curl share
2018-05-23 22:30:36 +09:00
a0e62b5588 Merge pull request #762 from gaul/s3proxy-1.6.0
Upgrade to S3Proxy 1.6.0
2018-05-23 22:23:33 +09:00
e9831dd772 Merge pull request #761 from gaul/ubuntu-16.04
Simplify installation for Ubuntu 16.04
2018-05-23 22:15:01 +09:00
da95afba8a Merge pull request #756 from orozery/optimize_defaults
Optimize defaults
2018-05-23 22:05:00 +09:00
0bd875eb9e remove false readdir_multi_head warnings 2018-05-22 17:10:50 +03:00
af63a42773 cleanup curl handles before curl share 2018-05-21 13:20:09 +03:00
ad9a374229 Simplify installation for Ubuntu 16.04
Also reorganize installation vs. compilation.
2018-05-16 17:40:13 -07:00
1b86e4d414 Upgrade to S3Proxy 1.6.0
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.6.0
https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.5.5
https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.5.4
2018-05-16 16:38:17 -07:00
86b0921ac4 enable FUSE read_sync by default 2018-05-06 16:10:36 +03:00
dbe98dcbd2 Merge pull request #755 from ggtakec/master
Added reset curl handle when returning to handle pool
2018-05-06 21:35:39 +09:00
4a72b60707 increase default stat cache size from 1000 to 100000 2018-05-06 15:31:07 +03:00
7a4696fc17 recommend openssl over gnutls for performance 2018-05-06 15:29:42 +03:00
e3de6ea458 Added reset curl handle when returning to handle pool 2018-05-06 12:11:53 +00:00
1db4739ed8 Merge pull request #754 from nkkashyap/master
Validate the URL format for http/https
2018-05-06 21:02:33 +09:00
25375a6b48 Validate the URL fixed inefficient usage of find 2018-05-04 11:24:32 +05:30
ca87df7d44 Validate the URL format for http/https 2018-05-03 22:08:28 +05:30
d052dc0b9d Merge pull request #753 from cfz/master
fix xpath selector in bucket listing
2018-05-02 12:04:12 +09:00
3f542e9cf5 Merge pull request #745 from orozery/handle_mkdir_exists
don't fail mkdir when directory exists
2018-05-02 11:37:18 +09:00
04493de767 fix xpath selector in bucket listing
the original implementation in get_base_exp() depends on the order of xml return from the server.
patriotically, when listing a directory with sub directory(s), the xml document response contains more than 2 <Prefix> nodes(some of them are in <CommonPrefixes> node).
the source code arbitrarily select the first one in the documents (nodes->nodeTab[0]->xmlChildrenNode).
some s3 compatible service return the list-bucket result in different result, leading the s3fs to a wrong behavior
2018-04-23 15:11:29 +08:00
4fdab46617 don't fail mkdir when directory exists 2018-04-08 11:13:47 +03:00
1a23b880d5 Merge pull request #739 from orozery/cleanup_failing_curl_handles
cleanup curl handle state on retries
2018-04-01 22:45:04 +09:00
b3c376afbe Merge pull request #733 from phxvyper/enhance/dupe-bucket-error
More useful error message for dupe entries in passwd file
2018-04-01 22:11:00 +09:00
adcf5754ae cleanup failing curl handles on retries 2018-03-29 13:56:08 +03:00
0863672e27 add a more helpful error message for when there are multiple entries for the same bucket in the passwd file 2018-03-13 14:37:34 -07:00
0f503ced25 Merge pull request #729 from dmgk/master
FreeBSD build fixes
2018-03-04 16:36:31 +09:00
987a166bf4 Merge pull request #726 from orozery/instance_name_logging
add an instance_name option for logging
2018-03-04 15:41:12 +09:00
57b6f0eeaf Merge pull request #724 from orozery/dont_fail_multirequest
don't fail multirequest on single thread error
2018-03-04 15:35:29 +09:00
f71a28f9b9 Merge pull request #714 from orozery/reduce_lock_contention
reduce lock contention on file open
2018-03-04 13:36:08 +09:00
45c7ea9194 Merge pull request #710 from orozery/disk_space_reservation
add disk space reservation
2018-03-04 13:27:25 +09:00
c9f4312588 FreeBSD build fixes 2018-03-02 15:58:52 -05:00
8b657eee41 add disk space reservation 2018-02-28 19:20:23 +02:00
b9c9de7f97 Merge pull request #712 from chrilith/master
Added Cygwin build options
2018-02-28 23:07:54 +09:00
e559f05326 Merge pull request #704 from vadimeremeev/patch-1
Update README.md with details about .passwd-s3fs
2018-02-28 22:22:01 +09:00
824124fedc Merge pull request #727 from ggtakec/master
Fixed Travis CI error about cppcheck - #713
2018-02-28 22:04:04 +09:00
be9d407fa0 Fixed cppcheck error on osx 2018-02-28 12:29:58 +00:00
c494e54320 Fixed cppcheck error on osx 2018-02-28 12:06:06 +00:00
b52b6f3fc5 add an instance_name option for logging 2018-02-28 09:51:35 +02:00
82c9733101 don't fail multirequest on single thread error 2018-02-26 12:06:08 +02:00
a45ff6cdaa Fixed cppcheck error and clean ^M code 2018-02-25 13:08:41 +00:00
960d45c853 Fixed cppcheck error on osx 2018-02-25 08:51:19 +00:00
246b767b64 Remove space in front of ~/.passwd-s3fs 2018-02-05 16:49:02 +07:00
0edf056e95 reduce lock contention on file open 2018-02-04 17:13:58 +02:00
88819af2d8 Added Cygwin build options 2018-02-02 15:58:10 +01:00
b048c981ad Update README.md with details about .passwd-s3fs 2017-12-22 16:20:02 +07:00
e1dafe76dd Merge pull request #701 from ggtakec/master
Updated ChangeLog and configure.ac for release 1.83
2017-12-17 16:53:49 +09:00
1a2e63ecff Updated ChangeLog and configure.ac for release 1.83 2017-12-17 07:37:19 +00:00
a60b32cb80 Merge pull request #699 from casidiablo/patch-1
Fix dbglevel usage
2017-12-17 15:55:24 +09:00
6b58220009 Merge pull request #697 from pwulff/master
Fixing race condition in FdEntity::GetStats
2017-12-17 15:46:48 +09:00
a841057679 Merge pull request #695 from orozery/fix_parallel_download_condition
fix condition for parallel download
2017-12-17 15:41:43 +09:00
ee6abea956 Race condition in FdManager::Rename because no mutex is used. 2017-12-15 15:27:51 +01:00
8b0acd75e0 Fix dbglevel usage 2017-12-14 14:37:18 -08:00
cea7d44717 Fixing race condition in FdEntity::GetStats 2017-12-13 10:49:00 +01:00
0da87e75fe fix condition for parallel download 2017-12-04 16:07:33 +02:00
566961c7a5 Merge pull request #692 from ggtakec/master
Updated template md files for issue and pr
2017-11-26 15:40:27 +09:00
ac65258d30 Updated template md files for issue and pr 2017-11-26 06:20:41 +00:00
35261e6dba Merge pull request #690 from ggtakec/master
Added option ecs description to man page
2017-11-23 21:40:11 +09:00
2818f23ba5 Added option ecs description to man page 2017-11-23 12:21:56 +00:00
88f071ea22 Merge pull request #669 from orozery/ibm_auth
add IBM IAM authentication support
2017-11-23 21:15:08 +09:00
bd4bc0e7f1 add support for IBM IAM authentication 2017-11-23 12:01:52 +02:00
890c1d53ff Merge pull request #688 from ggtakec/master
Improved use of temporary files - #678
2017-11-23 18:46:31 +09:00
026260e7a1 Improved use of temporary files 2017-11-23 09:18:11 +00:00
99fe93b7f1 Merge pull request #684 from gaul/signedness
Correct signedness warning
2017-11-23 17:33:46 +09:00
b764c53020 Merge pull request #686 from orozery/remove_jsoncpp
remove use of jsoncpp
2017-11-23 17:08:40 +09:00
11bd7128d2 remove use of jsoncpp 2017-11-22 13:36:27 +02:00
7cda32664b Correct signedness warning
Fixes regression from 0418e53b3c.
2017-11-19 11:14:37 -08:00
4c73a0ae56 Merge pull request #681 from ggtakec/master
Changed functions about reading passwd file
2017-11-19 21:06:45 +09:00
97fc845a6a Changed functions about reading passwd file. 2017-11-19 11:49:11 +00:00
7d9ac0163b Changed functions about reading passwd file. 2017-11-19 11:38:12 +00:00
d903e064e0 Merge pull request #677 from gaul/gitignore
Add s3proxy to .gitignore
2017-11-19 16:44:24 +09:00
e1928288fe Merge pull request #676 from gaul/sstream
Move str definition from header to implementation
2017-11-19 16:34:15 +09:00
6ab6412dd3 Merge pull request #679 from ggtakec/master
README.md Addition
2017-11-19 16:24:51 +09:00
30b7a69d3d README.md Addition 2017-11-19 07:03:39 +00:00
ccd0a446d8 Merge pull request #675 from gaul/define
Reduce use of preprocessor
2017-11-19 16:02:24 +09:00
0418e53b3c Reduce use of preprocessor
This provides type-safety and avoids token expansion side effects.
2017-11-18 22:40:06 -08:00
bad48ab59a Merge pull request #671 from psyvision/master
Add support for ECS metadata endpoint
2017-11-19 14:43:06 +09:00
bbad76bb71 Move str definition from header to implementation
Also clean up some char * to std::string conversions.
2017-11-18 11:34:34 -08:00
6c1bd98c14 Add s3proxy to .gitignore 2017-11-18 11:10:56 -08:00
b95e4acaeb Remove debug logging statements 2017-11-08 15:52:35 +00:00
c238701d09 Corrected ECS headers 2017-11-08 15:21:49 +00:00
60d2ac3c7a Adding x-amz-security-token header 2017-11-08 15:09:59 +00:00
967ef4d56b Corrected fat finger mistakes 2017-11-08 13:14:49 +00:00
ad57bdda6c Corrected keycount check 2017-11-08 13:06:22 +00:00
a0b69d1d3d Corrected keyval[].c_str() 2017-11-08 13:01:52 +00:00
5df94d7e33 Add debug messages 2017-11-08 09:50:39 +00:00
1cbe9fb7a3 Gotta pass that cppcheckgit add . 2017-11-07 21:41:51 +00:00
395f736753 Lower jsoncpp requirement 2017-11-07 21:38:01 +00:00
065516c5f3 Add jsoncpp to tavis config 2017-11-07 21:29:07 +00:00
8660abaea2 Use jsoncpp to parse AWS JSON 2017-11-07 21:20:02 +00:00
366f0705a0 ECS credentials bug fixes 2017-11-06 21:45:58 +00:00
ccea87ca68 Added check for is_ecs during get_access_keys
We need to presume that if `is_ecs` we are deferring access key loading
2017-11-06 11:02:27 +00:00
5d54883e2f Remove commented out code 2017-11-05 19:25:34 +00:00
662f65c3c8 Add support for ECS metadata endpoint 2017-11-05 19:24:02 +00:00
259f028490 Merge pull request #670 from ggtakec/master
Fixed a bug in S3fsCurl::LocateBundle
2017-11-05 20:37:12 +09:00
5db550a298 Fixed a bug in S3fsCurl::LocateBundle 2017-11-05 11:26:05 +00:00
e3c77d2906 Merge pull request #664 from orozery/auth_refactor
auth headers insertion refactoring
2017-11-05 15:14:40 +09:00
ba00e79253 Merge pull request #668 from ggtakec/master
Changed .travis.yml for fixing not found gpg2 on osx
2017-11-05 14:47:41 +09:00
c1791f920e Changed .travis.yml for fixing not found gpg2 on osx 2017-11-05 05:32:41 +00:00
df3803c7b7 Merge pull request #663 from gaul/orgmeta-lock
Lock FdEntity when mutating orgmeta
2017-11-05 13:53:14 +09:00
384b4cbafa auth headers insertion refactoring 2017-10-30 11:52:58 +02:00
40501a7a73 Lock FdEntity when mutating orgmeta
References #654.
2017-10-23 22:41:42 -07:00
ab89b4cd4a Merge pull request #659 from ggtakec/master
Do not fail updating directory when removing old-style object(ref #658)
2017-10-15 21:56:35 +09:00
48e0d55c8e Merge pull request #660 from s3fs-fuse/patch
Refixed s3fs_init message(ref #652)
2017-10-15 16:01:45 +09:00
1eba27a50a Refixed s3fs_init message(ref #652) 2017-10-15 06:45:19 +00:00
41206fa0e2 Do not fail updating directory when removing old-style object(ref #658) 2017-10-15 05:03:44 +00:00
21cf1d64e5 Merge pull request #652 from jurafxp/fix-error
Fix s3fs_init message
2017-10-15 12:36:12 +09:00
ae91b6f673 Fix s3fs_init message 2017-10-01 00:08:00 +02:00
f4515b5cfa Merge pull request #646 from andrewgaul/s3proxy-pid
Simplify S3Proxy PID handling
2017-09-20 05:17:00 +09:00
6c57cde7f9 Merge pull request #645 from andrewgaul/s3proxy-ssl
Configure S3Proxy for SSL
2017-09-19 21:39:32 +09:00
5014c1827b Simplify S3Proxy PID handling
Also remove log prefixing since newer S3Proxy versions do this
already.  Finally remove unnecessary wait.
2017-09-17 20:19:56 -07:00
f531e6aff2 Configure S3Proxy for SSL
This also demonstrates that SSL certificate checking occurs and the
tests must disable it for S3Proxy's self-signed certificate.
References #640.
2017-09-17 16:16:18 -07:00
c5c110137b Merge pull request #644 from ggtakec/master
Fixed with unnecessary equal in POST uploads url argment - #643
2017-09-17 20:08:53 +09:00
5957d9ead0 Fixed with unnecessary equal in POST uploads url argment - #643 2017-09-17 10:52:28 +00:00
5675df2a44 Merge pull request #642 from ggtakec/master
Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
2017-09-17 18:40:34 +09:00
00bc9142c4 Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633 2017-09-17 09:16:05 +00:00
5653ab39fc Merge pull request #639 from andrewgaul/homebrew
Update Homebrew instructions
2017-09-17 15:43:03 +09:00
473dd7c940 Merge pull request #637 from andrewgaul/non-aws-s3
Add blurb about non-Amazon S3 implementations
2017-09-17 15:37:13 +09:00
ee824d52ba Merge pull request #638 from andrewgaul/readme-fixes
Minor fixes to README
2017-09-17 15:31:09 +09:00
7c5fba9890 Merge pull request #636 from swt2c/macos_upload_failures
Fix intermittent upload failures on macOS
2017-09-17 15:23:48 +09:00
f214cb03b2 Update Homebrew instructions
These moved to homebrew-core:

https://github.com/Homebrew/homebrew-core/pull/11283
2017-09-12 18:58:19 -07:00
416c51799b Minor fixes to README 2017-09-12 18:44:27 -07:00
cf6f665f03 Add blurb about non-Amazon S3 implementations
References #629.
2017-09-12 18:09:56 -07:00
20da0e4dd3 Fix intermittent upload failures on macOS
There were multiple problems with the FdManager::GetFreeDiskSpace() function
on macOS:
1) When calling statvfs(), f_frsize should be used instead of f_bsize when
converting available blocks to bytes.  This was causing the free space
calculation to be incorrect.
2) On macOS, fsblkcnt_t is a 32-bit integer.  Thus, when calculating available
disk space, there were frequently overflows.  This caused s3fs to incorrectly
determine that the cache location was out of space in the middle of a transfer
which caused uploads to fail.  Changing this to a uint64_t resolves the
problem.
2017-09-08 15:23:10 -04:00
fa8c417526 Merge pull request #631 from s3fs-fuse/macosx
Merged macosx branch into master branch #601
2017-08-12 00:13:10 +09:00
2c65aec6c8 Merge pull request #630 from ggtakec/macosx
Added travis test on osx for #601
2017-08-11 23:46:51 +09:00
96d8e6d823 Merge remote-tracking branch 'upstream/macosx' into macosx 2017-08-11 14:20:12 +00:00
62b8084300 Added travis test on osx for #601 2017-08-11 14:09:43 +00:00
907aff5de4 Merge pull request #627 from swordsreversed/master
Update README.md
2017-08-02 22:14:08 +09:00
bc09129ec5 Update README.md 2017-08-01 10:20:46 +10:00
cd94f638e2 Update README.md
Add fuse as a dependency!
2017-07-31 21:23:08 +10:00
b1fe419870 Merge pull request #621 from andrewgaul/s3proxy
Upgrade to S3Proxy 1.5.3
2017-07-10 21:13:26 +09:00
98b724391f Upgrade to S3Proxy 1.5.3
Release notes:

https://github.com/andrewgaul/s3proxy/releases/tag/s3proxy-1.5.3
2017-07-09 22:41:39 -07:00
620f6ec616 Merge pull request #611 from ggtakec/master
Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
2017-05-28 19:15:45 +09:00
0c6a3882a2 Fixed clock_gettime build failure on macOS 10.12 Sierra - #600 2017-05-28 10:04:25 +00:00
a08880ae15 Merge pull request #608 from tlevi/chown_nocopy
Fix chown_nocopy losing existing uid/gid if unspecified
2017-05-28 18:36:08 +09:00
f48826dfe9 Merge pull request #609 from tlevi/getgrgid_r
Group permission checks sometimes fail with large number of groups
2017-05-27 11:02:18 +09:00
9c3551478e Merge pull request #606 from andrewgaul/homebrew
Add Homebrew instructions
2017-05-27 11:01:01 +09:00
cc94e1da26 Fix chown_nocopy losing existing uid/gid if unspecified 2017-05-25 16:53:08 +09:30
2b7ea5813c Expand buffer for group information if too small and retry 2017-05-23 10:42:43 +09:30
185192be67 Add Homebrew instructions 2017-05-22 10:12:22 -07:00
ae4caa96a0 Merge pull request #598 from ggtakec/master
Updated ChangeLog and configure.ac for release 1.82
2017-05-14 01:25:34 +09:00
af13ae82c1 Updated ChangeLog and configure.ac for release 1.82 2017-05-13 16:12:46 +00:00
13503c063b Merge pull request #597 from ggtakec/master
Not fallback to HTTP - #596
2017-05-14 01:01:36 +09:00
337da59368 Not fallback to HTTP - #596 2017-05-13 15:47:39 +00:00
51 changed files with 8649 additions and 3312 deletions

33
.clang-tidy Normal file
View File

@ -0,0 +1,33 @@
Checks: '
-*,
bugprone-*,
-bugprone-branch-clone,
-bugprone-macro-parentheses,
google-*,
-google-build-using-namespace,
-google-readability-casting,
-google-readability-function-size,
-google-readability-todo,
-google-runtime-int,
-google-runtime-references,
misc-*,
-misc-redundant-expression,
-misc-unused-parameters,
modernize-*,
-modernize-avoid-c-arrays,
-modernize-deprecated-headers,
-modernize-loop-convert,
-modernize-use-auto,
-modernize-use-nullptr,
-modernize-use-trailing-return-type,
-modernize-use-using,
performance-*,
portability-*,
readability-*,
-readability-else-after-return,
-readability-function-size,
-readability-implicit-bool-conversion,
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-named-parameter,
-readability-simplify-boolean-expr'

32
.gitattributes vendored Normal file
View File

@ -0,0 +1,32 @@
#
# s3fs - FUSE-based file system backed by Amazon S3
#
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
* text eol=lf
*.png binary
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: noet sw=4 ts=4 fdm=marker
# vim<600: noet sw=4 ts=4
#

View File

@ -1,27 +1,28 @@
#### Additional Information
### Additional Information
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
_Keep in mind that the commands we provide to retrieve information are oriented to GNU/Linux Distributions, so you could need to use others if you use s3fs on macOS or BSD_
- Version of s3fs being used (s3fs --version)
- _example: 1.0_
#### Version of s3fs being used (s3fs --version)
_example: 1.00_
- Version of fuse being used (pkg-config --modversion fuse)
- _example: 2.9.4_
#### Version of fuse being used (pkg-config --modversion fuse, rpm -qi fuse, dpkg -s fuse)
_example: 2.9.4_
- System information (uname -a)
- _command result: uname -a_
#### Kernel information (uname -r)
_command result: uname -r_
- Distro (cat /etc/issue)
- _command result: result_
#### GNU/Linux Distribution, if applicable (cat /etc/os-release)
_command result: cat /etc/os-release_
- s3fs command line used (if applicable)
#### s3fs command line used, if applicable
```
```
- /etc/fstab entry (if applicable):
#### /etc/fstab entry, if applicable
```
```
- s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
#### s3fs syslog messages (grep s3fs /var/log/syslog, journalctl | grep s3fs, or s3fs outputs)
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
```
```
#### Details about issue
### Details about issue

View File

@ -1,5 +1,5 @@
#### Relevant Issue (if applicable)
### Relevant Issue (if applicable)
_If there are Issues related to this PullRequest, please list it._
#### Details
### Details
_Please describe the details of PullRequest._

115
.gitignore vendored
View File

@ -1,31 +1,86 @@
#
# s3fs - FUSE-based file system backed by Amazon S3
#
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# Compiled Object files
#
*.slo
*.lo
*.o
/Makefile
/Makefile.in
/aclocal.m4
/autom4te.cache/
/config.guess
/config.log
/config.status
/config.sub
/stamp-h1
/config.h
/config.h.in
/config.h.in~
/configure
/depcomp
/test-driver
/compile
/doc/Makefile
/doc/Makefile.in
/install-sh
/missing
/src/.deps/
/src/Makefile
/src/Makefile.in
/src/s3fs
/src/test_*
/test/.deps/
/test/Makefile
/test/Makefile.in
/test/*.log
/default_commit_hash
*.Po
*.Plo
#
# autotools/automake
#
aclocal.m4
autom4te.cache
autoscan.log
config.guess
config.h
config.h.in
config.h.in~
config.log
config.status
config.sub
configure
configure.scan
depcomp
install-sh
libtool
ltmain.sh
m4
m4/*
missing
stamp-h1
Makefile
Makefile.in
test-driver
compile
missing
#
# object directories
#
.deps
.libs
*/.deps
*/.deps/*
*/.libs
*/.libs/*
#
# each directories
#
*.log
*.trs
default_commit_hash
src/s3fs
src/test_*
test/s3proxy-*
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: noet sw=4 ts=4 fdm=marker
# vim<600: noet sw=4 ts=4
#

View File

@ -1,17 +1,150 @@
#
# s3fs - FUSE-based file system backed by Amazon S3
#
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
language: cpp
sudo: required
dist: trusty
cache: apt
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
script:
- ./autogen.sh
- ./configure
- make
- make cppcheck
- make check -C src
- modprobe fuse
- make check -C test
- cat test/test-suite.log
dist: xenial
os: linux
jobs:
include:
- os: linux
dist: trusty
cache: apt
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- sudo -H pip install --upgrade awscli
script:
- ./autogen.sh
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
- make
- make cppcheck
- make check -C src
- modprobe fuse
- travis_wait 30 make check -C test
- test/filter-suite-log.sh test/test-suite.log
- os: osx
osx_image: xcode9.2
cache:
directories:
- $HOME/Library/Caches/Homebrew
- /usr/local/Homebrew
- $HOME/.osx_cache
before_cache:
- brew cleanup
- cd /usr/local/Homebrew; find . \! -regex ".+\.git.+" -delete
- mkdir -p $HOME/.osx_cache; touch $HOME/.osx_cache/cached
before_install:
- TAPS="$(brew --repository)/Library/Taps";
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then
rm -rf "$TAPS/caskroom/homebrew-cask";
fi;
if [ ! -f $HOME/.osx_cache/cached ]; then
echo "==> [Not found cache] brew tap homebrew/homebrew-cask";
echo "[NOTE]";
echo "If brew is executed without HOMEBREW_NO_AUTO_UPDATE=1,";
echo "python3 cannot be installed, so this is added as a temporary workaround.";
echo "If it is xcode 9.4 or higher, clear this patch.";
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask;
else
echo "==> [Found cache] HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask";
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask;
fi
- HOMEBREW_NO_AUTO_UPDATE=1 brew cask install osxfuse
- S3FS_BREW_PACKAGES='cppcheck python3';
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do
if brew list | grep -q ${s3fs_brew_pkg}; then
if brew outdated | grep -q ${s3fs_brew_pkg}; then
echo "==> Try to upgrade ${s3fs_brew_pkg}";
HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg};
fi
else
echo "==> Try to install ${s3fs_brew_pkg}";
HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg};
fi;
done
- if pip3 --version; then
echo "==> Try to install awscli by pip3";
sudo -H pip3 install awscli;
else
echo "==> Try to install awscli by pip";
curl https://bootstrap.pypa.io/get-pip.py | sudo python;
sudo -H pip install awscli --ignore-installed matplotlib;
fi
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then
sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs;
elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then
sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse;
else
exit 1;
fi
- if [ ! -f /usr/local/bin/truncate ]; then
echo "==> Make symbolic link truncate to gtruncate";
sudo ln -s /usr/local/opt/coreutils/bin/gtruncate /usr/local/bin/truncate;
fi
- if [ ! -f /usr/local/bin/stdbuf ]; then
echo "==> Make symbolic link stdbuf to gstdbuf";
sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf;
fi
script:
- ./autogen.sh
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
- make
- make cppcheck
- make check -C src
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then
/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs;
elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then
/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse;
else
exit 1;
fi
- travis_wait 30 make check -C test
- test/filter-suite-log.sh test/test-suite.log
- os: linux-ppc64le
dist: trusty
cache: apt
before_install:
- sudo add-apt-repository -y ppa:openjdk-r/ppa
- sudo apt-get update -qq
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-ppc64el/jre/bin/java
- sudo -H pip install --upgrade awscli
script:
- ./autogen.sh
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
- make
- make cppcheck
- make check -C src
- modprobe fuse
- travis_wait 30 make check -C test
- test/filter-suite-log.sh test/test-suite.log
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: noet sw=4 ts=4 fdm=marker
# vim<600: noet sw=4 ts=4
#

View File

@ -17,3 +17,7 @@ Bugfixes, performance and other improvements.
5. Takeshi Nakatani <ggtakec@gmail.com>
Bugfixes, performance and other improvements.
6. Andrew Gaul <gaul@gaul.org>
Bugfixes, performance and other improvements.

34
COMPILATION.md Normal file
View File

@ -0,0 +1,34 @@
# Compilation from source code
These are generic instructions should work on almost any GNU/Linux, macOS, BSD, or similar.
If you want specific instructions for some distributions, check the [wiki](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Installation-Notes).
Keep in mind using the pre-built packages when available.
1. Ensure your system satisfies build and runtime dependencies for:
* fuse >= 2.8.4
* automake
* gcc-c++
* make
* libcurl
* libxml2
* openssl
* mime.types (the package providing depends on the OS)
* s3fs tries to detect `/etc/mime.types` as default regardless of the OS
* Else s3fs tries to detect `/etc/apache2/mime.types` if OS is macOS
* s3fs exits with an error if these files are not exist
* Alternatively, you can set mime.types file path with `mime` option without detecting these default files
* pkg-config (or your OS equivalent)
2. Then compile from master via the following commands:
```
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
```

281
ChangeLog
View File

@ -1,6 +1,210 @@
ChangeLog for S3FS
ChangeLog for S3FS
------------------
Version 1.87 -- 10 Aug, 2020 (major changes only)
#1244 - use correct content-type when complete multipart upload
#1265 - Fixed a bug of stats cache compression
#1271 - Fixed the truncation bug of stat file for cache file
#1274 - Improved strictness of cache file stats(file)
#1277 - Fixed insufficient upload size for mix multipart upload
#1282 - Warn about missing MIME types instead of exiting
#1285 - Not abort process by exception threw from s3fs_strtoofft
#1286 - Support Google Cloud Storage headers
#1295 - Added a parameter to output body to curldbg option
#1302 - Fix renames of open files with nocopyapi option
#1303 - Relink cache stats file atomically via rename
#1305 - Ignore case when comparing ETags
#1306 - Retry with exponential backoff during 500 error
#1312 - Fixed a bug about serializing from cache file
#1313 - Fixed about ParallelMixMultipartUpload
#1316 - Add support for glacier storage class
#1319 - Fixed upload error about mixuploading sparse file and truncating file
#1334 - Added SIGUSR1 option for cache file integrity test
#1341 - Change default stat_cache_expire
Version 1.86 -- 04 Feb, 2020 (major changes only)
#965 - enable various optimizations when using modern curl
#1002 - allow SSE-C keys to have NUL bytes
#1008 - add session token support
#1039 - allow large files on 32-bit systems like Raspberry Pi
#1049 - fix data corruption when external modification changes a cached object
#1063 - fix data corruption when opening a second fd to an unflushed file
#1066 - fix clock skew errors when writing large files
#1081 - allow concurrent metadata queries during data operations
#1098 - use server-side copy for partially modified files
#1107 - #1108 - fix multiple concurrency issues
#1199 - add requester_pays support
#1209 - add symlink cache
#1224 - add intelligent_ia storage tier
Version 1.85 -- 11 Mar, 2019
#804 - add Backblaze B2
#812 - Fix typo s/mutliple/multiple/
#819 - #691: Made instructions for creating password file more obvious.
#820 - Enable big writes if capable
#826 - For RPM distributions fuse-libs is enough
#831 - Add support for storage class ONEZONE_IA.
#832 - Simplify hex conversion
#833 - New installation instructions for Fedora >= 27 and CentOS7
#834 - Improve template for issues
#835 - Make the compilation instructions generic
#840 - Replace all mentions to MacOS X to macOS
#849 - Correct typo
#851 - Correctly compare list_object_max_keys
#852 - Allow credentials from ${HOME}/.aws/credentials
#853 - Replace ~ with ${HOME} in examples
#855 - Include StackOverflow in FAQs
#856 - Add icon for s3fs
#859 - Upload S3 parts without batching
#861 - Add 'profile' option to command line help.
#865 - fix multihead warning check
#866 - Multi-arch support for ppc64le
#870 - Correct typos in command-line parsing
#874 - Address cppcheck 1.86 errors
#877 - Check arguments and environment before .aws/creds
#882 - [curl] Assume long encryption keys are base64 encoded
#885 - Update s3fs_util.cpp for correspondence of Nextcloud contype
#888 - Add Server Fault to FAQs
#892 - Repair xattr tests
#893 - Store and retrieve file change time
#894 - Default uid/gid/mode when object lacks permissions
#895 - Emit more friendly error for buckets with dots
#898 - Flush file before renaming
#899 - Tighten up HTTP response code check
#900 - Plug memory leak
#901 - Plug memory leaks
#902 - Avoid pass-by-value when not necessary
#903 - Prefer find(char) over find(const char *)
#904 - Remove unnecessary calls to std::string::c_str
#905 - Fix comparison in s3fs_strtoofft
#906 - Prefer HTTPS links where possible
#908 - Added an error message when HTTP 301 status
#909 - Ignore after period character of floating point in x-amz-meta-mtime
#910 - Added a missing extension to .gitignore, and formatted dot files
#911 - Added detail error message when HTTP 301/307 status
#912 - Automatic region change made possible other than us-east-1(default)
#913 - Prefer abort over assert(false)
#914 - Issue readdir HEAD requests without batching
#917 - Reference better-known AWS CLI for compatibility
#918 - Load tail range during overwrite
#919 - Add test for mv non-empty directory
#920 - Remove unnecessary string copies
#921 - Remove redundant string initializations
#923 - Reverted automatic region change and changed messages
#924 - Prefer empty over size checks
#925 - Remove redundant null checks before delete
#926 - Accept paths with : in them
#930 - Correct enable_content_md5 docs
#931 - Correct sigv2 typo
#932 - Prefer AutoLock for synchronization
#933 - Remove mirror path when deleting cache
#934 - Checked and corrected all typo
#937 - Disable malloc_trim
#938 - Remove unneeded void parameter
#939 - Prefer specific [io]stringstream where possible
#940 - Copy parts in parallel
#942 - Ensure s3fs compiles with C++03
#943 - Return not supported when hard linking
#944 - Repair utility mode
#946 - Simplify async request completion code
#948 - Add logging for too many parts
#949 - Implement exponential backoff for 503
#950 - Added S3FS_MALLOC_TRIM build switch
#951 - Added a non-interactive option to utility mode
#952 - Automatically abort failed multipart requests
#953 - Update s3ql link
#954 - Clear containers instead of individual erases
#955 - Address miscellaneous clang-tidy warnings
#957 - Upgrade to S3Proxy 1.6.1
#958 - Document lack of inotify support
#959 - Fixed code for latest cppcheck error on OSX
#960 - Wtf8
#961 - Work around cppcheck warnings
#965 - Improvement of curl session pool for multipart
#967 - Increase FdEntity reference count when returning
#969 - Fix lazy typo
#970 - Remove from file from stat cache during rename
#972 - Add instructions for Amazon Linux
#974 - Changed the description order of man page options
#975 - Fixed ref-count when error occurred.
#977 - Make macOS instructions consistent with others
Version 1.84 -- Jul 8, 2018
#704 - Update README.md with details about .passwd-s3fs
#710 - add disk space reservation
#712 - Added Cygwin build options
#714 - reduce lock contention on file open
#724 - don't fail multirequest on single thread error
#726 - add an instance_name option for logging
#727 - Fixed Travis CI error about cppcheck - #713
#729 - FreeBSD build fixes
#733 - More useful error message for dupe entries in passwd file
#739 - cleanup curl handle state on retries
#745 - don't fail mkdir when directory exists
#753 - fix xpath selector in bucket listing
#754 - Validate the URL format for http/https
#755 - Added reset curl handle when returning to handle pool
#756 - Optimize defaults
#761 - Simplify installation for Ubuntu 16.04
#762 - Upgrade to S3Proxy 1.6.0
#763 - cleanup curl handles before curl share
#764 - Remove false multihead warnings
#765 - Add Debian installation instructions
#766 - Remove s3fs-python
#768 - Fixed memory leak
#769 - Revert "enable FUSE read_sync by default"
#774 - Option for IAM authentication endpoint
#780 - gnutls_auth: initialize libgcrypt
#781 - Fixed an error by cppcheck on OSX
#786 - Log messages for 5xx and 4xx HTTP response code
#789 - Instructions for SUSE and openSUSE prebuilt packages
#793 - Added list_object_max_keys option based on #783 PR
Version 1.83 -- Dec 17, 2017
#606 - Add Homebrew instructions
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
#609 - Group permission checks sometimes fail with large number of groups
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
#621 - Upgrade to S3Proxy 1.5.3
#627 - Update README.md
#630 - Added travis test on osx for #601
#631 - Merged macosx branch into master branch #601
#636 - Fix intermittent upload failures on macOS
#637 - Add blurb about non-Amazon S3 implementations
#638 - Minor fixes to README
#639 - Update Homebrew instructions
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
#644 - Fixed with unnecessary equal in POST uploads url argument - #643
#645 - Configure S3Proxy for SSL
#646 - Simplify S3Proxy PID handling
#652 - Fix s3fs_init message
#659 - Do not fail updating directory when removing old-style object(ref #658)
#660 - Refixed s3fs_init message(ref #652)
#663 - Lock FdEntity when mutating orgmeta
#664 - auth headers insertion refactoring
#668 - Changed .travis.yml for fixing not found gpg2 on osx
#669 - add IBM IAM authentication support
#670 - Fixed a bug in S3fsCurl::LocateBundle
#671 - Add support for ECS metadata endpoint
#675 - Reduce use of preprocessor
#676 - Move str definition from header to implementation
#677 - Add s3proxy to .gitignore
#679 - README.md Addition
#681 - Changed functions about reading passwd file
#684 - Correct signedness warning
#686 - remove use of jsoncpp
#688 - Improved use of temporary files - #678
#690 - Added option ecs description to man page
#692 - Updated template md files for issue and pr
#695 - fix condition for parallel download
#697 - Fixing race condition in FdEntity::GetStats
#699 - Fix dbglevel usage
Version 1.82 -- May 13, 2017
#597 - Not fallback to HTTP - #596
#598 - Updated ChangeLog and configure.ac for release 1.82
Version 1.81 -- May 13, 2017
#426 - Updated to correct ChangeLog
#431 - fix typo s/controll/control/
@ -37,7 +241,7 @@ Version 1.81 -- May 13, 2017
#540 - Address cppcheck 1.77 warnings
#545 - Changed base cached time of stat_cache_expire option - #523
#546 - Fixed double initialization of SSL library at foreground
#550 - Add umount instruction for unplivileged user
#550 - Add umount instruction for unprivileged user
#551 - Updated stat_cache_expire option description - #545
#552 - switch S3fsMultiCurl to use foreground threads
#553 - add TLS cipher suites customization
@ -60,6 +264,7 @@ Version 1.81 -- May 13, 2017
#590 - Updated man page for default_acl option - #567
#593 - Backward compatible for changing default transport to HTTPS
#594 - Check bucket at public bucket and add nocopyapi option automatically
#595 - Updated ChangeLog and configure.ac for release 1.81
Version 1.80 -- May 29, 2016
#213 - Parse ETag from copy multipart correctly
@ -89,7 +294,7 @@ Version 1.80 -- May 29, 2016
#250 - s3fs can print version with short commit hash - #228
#251 - Skip xattr tests if utilities are missing
#252 - This fixes an issue with caching when the creation of a subdirectory …
#253 - Added chacking cache dir perms at starting.
#253 - Added checking cache dir perms at starting.
#256 - Add no atomic rename to limitations
#257 - Update README.md: Bugfix password file permissions errors
#258 - Update README.md to better explain mount upon boot
@ -117,7 +322,7 @@ Version 1.80 -- May 29, 2016
#306 - Fix read concurrency to work in parallel count
#307 - Fix pthread portability problem
#308 - Changed ensure free disk space as additional change for #306
#309 - Check pthread prtability in configure as additional change for #307
#309 - Check pthread portability in configure as additional change for #307
#310 - Update integration-test-main.sh as additional change for #300
#311 - Change error log to debug log in s3fs_read()
#313 - fix gitignore
@ -129,14 +334,14 @@ Version 1.80 -- May 29, 2016
#330 - Pass by const reference where possible
#331 - Address various clang warnings
#334 - Bucket host should include port and not path
#336 - update REAME.md for fstab
#336 - update README.md for fstab
#338 - Fixed a bug about IAMCRED type could not be retried.
#339 - Updated README.md for fstab example.
#341 - Fix the memory leak issue in fdcache.
#346 - Fix empty directory check against AWS S3
#348 - Integration test summary, continue on error
#350 - Changed cache out logic for stat - #340
#351 - Check cache dirctory path and attributes - #347
#351 - Check cache directory path and attributes - #347
#352 - Remove stat file cache dir if specified del_cache - #337
#354 - Supported regex type for additional header format - #343
#355 - Fixed codes about clock_gettime for osx
@ -213,7 +418,7 @@ issue #184 - Add usage information for multipart_size
issue #185 - Correct obvious typos in usage and README
issue #190 - Add a no_check_certificate option.
issue #194 - Tilda in a file-name breaks things (EPERM)
issue #198 - Disasble integration tests for Travis
issue #198 - Disable integration tests for Travis
issue #199 - Supported extended attributes(retry)
issue #200 - fixed fallback to sigv2 for bucket create and GCS
issue #202 - Specialize {set,get}xattr for OS X
@ -250,97 +455,97 @@ issue #4 - Fix compilation error on MacOSX with missing const
Version 1.74 -- Nov 24, 2013
This version is initial version on Github, same as on GoogleCodes(s3fs).
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
see more detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
Version 1.73 -- Aug 23, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
Version 1.72 -- Aug 10, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
Version 1.71 -- Jun 15, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
Version 1.70 -- Jun 01, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
Version 1.69 -- May 15, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
Version 1.68 -- Apr 30, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
Version 1.67 -- Apr 13, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
Version 1.66 -- Apr 06, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
Version 1.65 -- Mar 30, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
Version 1.64 -- Mar 23, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
Version 1.63 -- Feb 24, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
Version 1.62 -- Jan 27, 2013
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
Version 1.61 -- Aug 30, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
Version 1.60 -- Aug 29, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
Version 1.59 -- Jul 28, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
Version 1.58 -- Jul 19, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
Version 1.57 -- Jul 07, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
Version 1.56 -- Jul 07, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
Version 1.55 -- Jul 02, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
Version 1.54 -- Jun 25, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
Version 1.53 -- Jun 22, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
Version 1.40 -- Feb 11, 2011
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
Version 1.33 -- Dec 30, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
Version 1.25 -- Dec 16, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
Version 1.19 -- Dec 2, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
Version 1.16 -- Nov 22, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
Version 1.10 -- Nov 6, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
Version 1.02 -- Oct 29, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
Version 1.01 -- Oct 28, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
Version 1.0 -- Oct 24, 2010
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
------
Version 1.1 -- Mon Oct 18 2010

View File

@ -124,7 +124,7 @@ architecture at a time in the source code directory. After you have
installed the package for one architecture, use `make distclean' before
reconfiguring for another architecture.
On MacOS X 10.5 and later systems, you can create libraries and
On macOS 10.5 and later systems, you can create libraries and
executables that work on multiple system types--known as "fat" or
"universal" binaries--by specifying multiple `-arch' options to the
compiler but only a single `-arch' option to the preprocessor. Like

View File

@ -32,10 +32,14 @@ cppcheck:
cppcheck --quiet --error-exitcode=1 \
--inline-suppr \
--std=c++03 \
--xml \
-D HAVE_ATTR_XATTR_H \
-D HAVE_SYS_EXTATTR_H \
-D HAVE_MALLOC_TRIM \
-U CURLE_PEER_FAILED_VERIFICATION \
-U P_tmpdir \
--enable=all \
-U ENOATTR \
--enable=warning,style,information,missingInclude \
--suppress=missingIncludeSystem \
--suppress=unusedFunction \
--suppress=variableScope \
--suppress=unmatchedSuppression \
src/ test/

171
README.md
View File

@ -1,15 +1,16 @@
s3fs
====
# s3fs
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
s3fs allows Linux and macOS to mount an S3 bucket via FUSE.
s3fs preserves the native object format for files, allowing use of other
tools like [AWS CLI](https://github.com/aws/aws-cli).
[![Build Status](https://travis-ci.org/s3fs-fuse/s3fs-fuse.svg?branch=master)](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
[![Twitter Follow](https://img.shields.io/twitter/follow/s3fsfuse.svg?style=social&label=Follow)](https://twitter.com/s3fsfuse)
Features
--------
## Features
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
* allows random writes and appends
* large files via multi-part upload
* renames via server-side copy
* optional server-side encryption
@ -19,110 +20,156 @@ Features
* user-specified regions, including Amazon GovCloud
* authenticate via v2 or v4 signatures
Installation
------------
## Installation
Ensure you have all the dependencies:
Many systems provide pre-built packages:
On Ubuntu 14.04:
* Amazon Linux via EPEL:
```
sudo amazon-linux-extras install epel
sudo yum install s3fs-fuse
```
* Arch Linux:
```
sudo pacman -S s3fs-fuse
```
* Debian 9 and Ubuntu 16.04 or newer:
```
sudo apt install s3fs
```
* Fedora 27 or newer:
```
sudo dnf install s3fs-fuse
```
* Gentoo:
```
sudo emerge net-fs/s3fs
```
* RHEL and CentOS 7 or newer through via EPEL:
```
sudo yum install epel-release
sudo yum install s3fs-fuse
```
* SUSE 12 and openSUSE 42.1 or newer:
```
sudo zypper install s3fs
```
* macOS via [Homebrew](https://brew.sh/):
```
brew cask install osxfuse
brew install s3fs
```
Otherwise consult the [compilation instructions](COMPILATION.md).
## Examples
s3fs supports the standard
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
The default location for the s3fs password file can be created:
* using a `.passwd-s3fs` file in the users home directory (i.e. `${HOME}/.passwd-s3fs`)
* using the system-wide `/etc/passwd-s3fs` file
Enter your credentials in a file `${HOME}/.passwd-s3fs` and set
owner-only permissions:
```
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
```
On CentOS 7:
```
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
```
Compile from master via the following commands:
```
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
```
Examples
--------
Enter your S3 identity and credential in a file `/path/to/passwd`:
```
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
```
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
```
chmod 600 /path/to/passwd
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs
chmod 600 ${HOME}/.passwd-s3fs
```
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs
```
If you encounter any errors, enable debug output:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbglevel=info -f -o curldbg
```
You can also mount on boot by entering the following line to `/etc/fstab`:
```
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
```
or
```
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
```
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=https://url.to.s3/ -o use_path_request_style
```
or(fstab)
```
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
```
Note: You may also want to create the global credential file first
```
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > /etc/passwd-s3fs
chmod 600 /etc/passwd-s3fs
```
Note2: You may also need to make sure `netfs` service is start on boot
Limitations
-----------
## Limitations
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
* random writes or appends to files require rewriting the entire file
* random writes or appends to files require rewriting the entire object, optimized with multi-part upload copy
* metadata operations such as listing directories have poor performance due to network latency
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
* no atomic renames of files or directories
* no coordination between multiple clients mounting the same bucket
* no hard links
* inotify detects only local modifications, not external ones by other clients or tools
References
----------
## References
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
* [s3ql](https://github.com/s3ql/s3ql/) - similar to s3fs but uses its own object format
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
Frequently Asked Questions
--------------------------
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
## Frequently Asked Questions
License
-------
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
* [s3fs on Stack Overflow](https://stackoverflow.com/questions/tagged/s3fs)
* [s3fs on Server Fault](https://serverfault.com/questions/tagged/s3fs)
## License
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
Licensed under the GNU GPL version 2

View File

@ -20,7 +20,7 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
AC_INIT(s3fs, 1.81)
AC_INIT(s3fs, 1.87)
AC_CONFIG_HEADER([config.h])
AC_CANONICAL_SYSTEM
@ -33,12 +33,17 @@ AC_CHECK_HEADERS([sys/xattr.h])
AC_CHECK_HEADERS([attr/xattr.h])
AC_CHECK_HEADERS([sys/extattr.h])
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2"
dnl ----------------------------------------------
dnl For OSX
dnl For macOS
dnl ----------------------------------------------
case "$target" in
*-cygwin* )
# Do something specific for windows using winfsp
CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE=1"
min_fuse_version=2.8
;;
*-darwin* )
# Do something specific for mac
min_fuse_version=2.7.3
@ -176,13 +181,13 @@ dnl
dnl For PKG_CONFIG before checking nss/gnutls.
dnl this is redundant checking, but we need checking before following.
dnl
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6])
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
AC_MSG_CHECKING([compile s3fs with])
case "${auth_lib}" in
openssl)
AC_MSG_RESULT(OpenSSL)
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
;;
gnutls)
AC_MSG_RESULT(GnuTLS-gcrypt)
@ -232,7 +237,7 @@ dnl ----------------------------------------------
dnl malloc_trim function
AC_CHECK_FUNCS([malloc_trim])
dnl clock_gettime function(osx)
dnl clock_gettime function(macos)
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
AC_CHECK_FUNCS([clock_gettime])
@ -259,6 +264,51 @@ AC_COMPILE_IFELSE(
]
)
dnl ----------------------------------------------
dnl check CURLoption
dnl ----------------------------------------------
dnl CURLOPT_TCP_KEEPALIVE (is supported by 7.25.0 and later)
AC_MSG_CHECKING([checking CURLOPT_TCP_KEEPALIVE])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
[[CURLoption opt = CURLOPT_TCP_KEEPALIVE;]])
],
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 1, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
AC_MSG_RESULT(yes)
],
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 0, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
AC_MSG_RESULT(no)
]
)
dnl CURLOPT_SSL_ENABLE_ALPN (is supported by 7.36.0 and later)
AC_MSG_CHECKING([checking CURLOPT_SSL_ENABLE_ALPN])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
[[CURLoption opt = CURLOPT_SSL_ENABLE_ALPN;]])
],
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 1, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
AC_MSG_RESULT(yes)
],
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 0, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
AC_MSG_RESULT(no)
]
)
dnl CURLOPT_KEEP_SENDING_ON_ERROR (is supported by 7.51.0 and later)
AC_MSG_CHECKING([checking CURLOPT_KEEP_SENDING_ON_ERROR])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
[[CURLoption opt = CURLOPT_KEEP_SENDING_ON_ERROR;]])
],
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 1, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
AC_MSG_RESULT(yes)
],
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 0, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
AC_MSG_RESULT(no)
]
)
dnl ----------------------------------------------
dnl output files
dnl ----------------------------------------------
@ -268,10 +318,10 @@ dnl ----------------------------------------------
dnl short commit hash
dnl ----------------------------------------------
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no])
AS_IF([test -d .git], [DOTGITDIR=yes], [DOTGITDIR=no])
AC_MSG_CHECKING([github short commit hash])
if test x${GITCMD} = xyes -a x${DOTGITDIR} = xyes; then
if test "x${GITCMD}" = "xyes" -a "x${DOTGITDIR}" = "xyes"; then
GITCOMMITHASH=`git rev-parse --short HEAD`
elif test -f default_commit_hash; then
GITCOMMITHASH=`cat default_commit_hash`

View File

@ -6,7 +6,7 @@ S3FS \- FUSE-based file system backed by Amazon S3
.TP
\fBs3fs bucket[:/path] mountpoint \fP [options]
.TP
\fBs3fs mountpoint \fP [options(must specify bucket= option)]
\fBs3fs mountpoint \fP [options (must specify bucket= option)]
.SS unmounting
.TP
\fBumount mountpoint
@ -14,12 +14,16 @@ For root.
.TP
\fBfusermount -u mountpoint
For unprivileged user.
.SS utility mode ( remove interrupted multipart uploading objects )
.SS utility mode (remove interrupted multipart uploading objects)
.TP
\fBs3fs \-u bucket
\fBs3fs --incomplete-mpu-list (-u) bucket
.TP
\fBs3fs --incomplete-mpu-abort[=all | =<expire date format>] bucket
.SH DESCRIPTION
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
.SH AUTHENTICATION
s3fs supports the standard AWS credentials file (https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`.
Alternatively, s3fs supports a custom passwd file. Only AWS credentials file format can be used when AWS session token is required.
The s3fs password file has this format (use this format if you have only one set of credentials):
.RS 4
\fBaccessKeyId\fP:\fBsecretAccessKey\fP
@ -35,6 +39,8 @@ Password files can be stored in two locations:
\fB/etc/passwd-s3fs\fP [0640]
\fB$HOME/.passwd-s3fs\fP [0600]
.RE
.PP
s3fs also recognizes the \fBAWSACCESSKEYID\fP and \fBAWSSECRETACCESSKEY\fP environment variables.
.SH OPTIONS
.SS "general options"
.TP
@ -48,21 +54,20 @@ print version
FUSE foreground option - do not run as daemon.
.TP
\fB\-s\fR
FUSE singlethreaded option (disables multi-threaded operation)
FUSE single-threaded option (disables multi-threaded operation)
.SS "mount options"
.TP
All s3fs options must given in the form where "opt" is:
<option_name>=<option_value>
.TP
\fB\-o\fR bucket
if it is not specified bucket name(and path) in command line, must specify this option after \-o option for bucket name.
if it is not specified bucket name (and path) in command line, must specify this option after \-o option for bucket name.
.TP
\fB\-o\fR default_acl (default="private")
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
empty string means do not send header.
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
see https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for the full list of canned acls.
.TP
\fB\-o\fR retries (default="2")
\fB\-o\fR retries (default="5")
number of times to retry a failed S3 transaction.
.TP
\fB\-o\fR use_cache (default="" which means disabled)
@ -75,10 +80,10 @@ If this option is not specified, it will be created at runtime when the cache di
\fB\-o\fR del_cache - delete local file cache
delete local file cache when s3fs starts and exits.
.TP
\fB\-o\fR storage_class (default is standard)
\fB\-o\fR storage_class (default="standard")
store object with specified storage class.
this option replaces the old option use_rrs.
Possible values: standard, standard_ia, and reduced_redundancy.
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering, and glacier.
.TP
\fB\-o\fR use_rrs (default is disable)
use Amazon's Reduced Redundancy Storage.
@ -89,19 +94,19 @@ this option has been replaced by new storage_class option.
\fB\-o\fR use_sse (default is disable)
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>"(only <custom key file path> specified is old type parameter).
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>" (only <custom key file path> specified is old type parameter).
You can use "c" for short "custom".
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
If you specify "custom" ("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment. (AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
This option is used to decide the SSE type.
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
You can use "k" for short "kmsid".
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:" (or "k:").
If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
You must be careful about that you can not use the KMS id which is not same EC2 region.
.TP
\fB\-o\fR load_sse_c - specify SSE-C keys
@ -115,12 +120,12 @@ AWSSSECKEYS environment is as same as this file contents.
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
.TP
\fB\-o\fR ahbe_conf (default="" which means disabled)
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
This option specifies the configuration file path which file is the additional HTTP header by file (object) extension.
The configuration file format is below:
-----------
line = [file suffix or regex] HTTP-header [HTTP-values]
file suffix = file(object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
regex = regular expression to match the file(object) path. this type starts with "reg:" prefix.
file suffix = file (object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
regex = regular expression to match the file (object) path. this type starts with "reg:" prefix.
HTTP-header = additional HTTP header name
HTTP-values = additional HTTP header value
-----------
@ -133,6 +138,10 @@ This option specifies the configuration file path which file is the additional H
A sample configuration file is uploaded in "test" directory.
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
.TP
\fB\-o\fR profile (default="default")
Choose a profile from ${HOME}/.aws/credentials to authenticate against S3.
Note that this format matches the AWS CLI format and differs from the s3fs passwd format.
.TP
\fB\-o\fR public_bucket (default="" which means disabled)
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
@ -140,90 +149,113 @@ S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi
\fB\-o\fR connect_timeout (default="300" seconds)
time to wait for connection before giving up.
.TP
\fB\-o\fR readwrite_timeout (default="60" seconds)
\fB\-o\fR readwrite_timeout (default="120" seconds)
time to wait between read/write activity before giving up.
.TP
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
maximum number of entries in the stat cache
\fB\-o\fR list_object_max_keys (default="1000")
specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more.
.TP
\fB\-o\fR stat_cache_expire (default is no expire)
specify expire time(seconds) for entries in the stat cache. This expire time indicates the time since stat cached.
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
maximum number of entries in the stat cache and symbolic link cache.
.TP
\fB\-o\fR stat_cache_interval_expire (default is no expire)
specify expire time(seconds) for entries in the stat cache. This expire time is based on the time from the last access time of the stat cache.
\fB\-o\fR stat_cache_expire (default is 900)
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time indicates the time since cached.
.TP
\fB\-o\fR stat_cache_interval_expire (default is 900)
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
.TP
\fB\-o\fR enable_noobj_cache (default is disable)
enable cache entries for the object which does not exist.
s3fs always has to check whether file(or sub directory) exists under object(path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
s3fs always has to check whether file (or sub directory) exists under object (path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
It increases ListBucket request and makes performance bad.
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
You can specify this option for performance, s3fs memorizes in stat cache that the object (file or directory) does not exist.
.TP
\fB\-o\fR no_check_certificate (by default this option is disabled)
do not check ssl certificate.
server certificate won't be checked against the available certificate authorities.
.TP
\fB\-o\fR nodnscache - disable dns cache.
s3fs is always using dns cache, this option make dns cache disable.
\fB\-o\fR ssl_verify_hostname (default="2")
When 0, do not verify the SSL certificate against the hostname.
.TP
\fB\-o\fR nosscache - disable ssl session cache.
s3fs is always using ssl session cache, this option make ssl session cache disable.
\fB\-o\fR nodnscache - disable DNS cache.
s3fs is always using DNS cache, this option make DNS cache disable.
.TP
\fB\-o\fR nosscache - disable SSL session cache.
s3fs is always using SSL session cache, this option make SSL session cache disable.
.TP
\fB\-o\fR multireq_max (default="20")
maximum number of parallel request for listing objects.
.TP
\fB\-o\fR parallel_count (default="5")
number of parallel request for uploading big objects.
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
s3fs uploads large object (over 20MB) by multipart post request, and sends parallel requests.
This option limits parallel request count which s3fs requests at once.
It is necessary to set this value depending on a CPU and a network band.
.TP
\fB\-o\fR multipart_size(default="10"(10MB))
number of one part size in multipart uploading request.
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
Specify number of MB and over 5(MB).
\fB\-o\fR multipart_size (default="10")
part size, in MB, for each multipart request.
The minimum value is 5 MB and the maximum value is 5 GB.
.TP
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
\fB\-o\fR ensure_diskfree (default 0)
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
s3fs makes file for downloading, and uploading and caching files.
s3fs makes file for downloading, uploading and caching files.
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
.TP
\fB\-o\fR singlepart_copy_limit (default="512")
maximum size, in MB, of a single-part copy before trying
multipart copy.
.TP
\fB\-o\fR host (default="https://s3.amazonaws.com")
Set a non-Amazon host, e.g., https://example.com.
.TP
\fB\-o\fR servicepath (default="/")
Set a service path when the non-Amazon host requires a prefix.
.TP
\fB\-o\fR url (default="https://s3.amazonaws.com")
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
If you start s3fs without specifying the url option, s3fs will check the bucket using https://s3.amazonaws.com.
And when bucket check fails, s3fs retries the bucket check using http://s3.amazonaws.com.
This is the function left behind for backward compatibility.
If you do not use https, please specify the URL with the url option.
.TP
\fB\-o\fR endpoint (default="us-east-1")
sets the endpoint to use.
sets the endpoint to use on signature version 4.
If this option is not specified, s3fs uses "us-east-1" region as the default.
If the s3fs could not connect to the region specified by this option, s3fs could not run.
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
.TP
\fB\-o\fR sigv2 (default is signature version 4)
sets signing AWS requests by sing Signature Version 2.
sets signing AWS requests by using Signature Version 2.
.TP
\fB\-o\fR mp_umask (default is "0000")
sets umask for the mount point directory.
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
In the opposite case s3fs allows access to all users as the default.
But if you set the allow_other with this option, you can control permissions of the mount point by this option like umask.
But if you set the allow_other with this option, you can control the permissions of the mount point by this option like umask.
.TP
\fB\-o\fR umask (default is "0000")
sets umask for files under the mountpoint. This can allow
users other than the mounting user to read and write to files
that they did not create.
.TP
\fB\-o\fR nomultipart - disable multipart uploads
.TP
\fB\-o\fR enable_content_md5 ( default is disable )
verifying uploaded data without multipart by content-md5 header.
Enable to send "Content-MD5" header when uploading a object without multipart posting.
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
\fB\-o\fR enable_content_md5 (default is disable)
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
This can add CPU overhead to transfers.
.TP
\fB\-o\fR iam_role ( default is no IAM role )
\fB\-o\fR ecs (default is disable)
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
.TP
\fB\-o\fR iam_role (default is no IAM role)
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
.TP
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
Enable to handle the extended attribute(xattrs).
\fB\-o\fR ibm_iam_auth (default is not using IBM IAM authentication)
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
.TP
\fB\-o\fR ibm_iam_endpoint (default is https://iam.bluemix.net)
Sets the URL to use for IBM IAM authentication.
.TP
\fB\-o\fR use_xattr (default is not handling the extended attribute)
Enable to handle the extended attribute (xattrs).
If you set this option, you can use the extended attribute.
For example, encfs and ecryptfs need to support the extended attribute.
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
@ -232,13 +264,18 @@ Notice: if s3fs handles the extended attribute, s3fs can not work to copy comman
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
.TP
\fB\-o\fR nomixupload - disable copy in multipart uploads.
Disable to use PUT (copy api) when multipart uploading large size objects.
By default, when doing multipart upload, the range of unchanged data will use PUT (copy api) whenever possible.
When nocopyapi or norenameapi is specified, use of PUT (copy api) is invalidated even if this option is not specified.
.TP
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
For a distributed object storage which is compatibility S3 API without PUT(copy api).
If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
For a distributed object storage which is compatibility S3 API without PUT (copy api).
If you set this option, s3fs do not use PUT with "x-amz-copy-source" (copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
.TP
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
For a distributed object storage which is compatibility S3 API without PUT(copy api).
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
For a distributed object storage which is compatibility S3 API without PUT (copy api).
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command (ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command (ex. mv).
If this option is specified with nocopyapi, then s3fs ignores it.
.TP
\fB\-o\fR use_path_request_style (use legacy API calling style)
@ -249,10 +286,14 @@ Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <
If this option is specified, s3fs suppresses the output of the User-Agent.
.TP
\fB\-o\fR cipher_suites
Customize TLS cipher suite list. Expects a colon separated list of cipher suite names.
Customize the list of TLS cipher suites. Expects a colon separated list of cipher suite names.
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
https://curl.haxx.se/docs/ssl-ciphers.html
.TP
\fB\-o\fR instance_name
The instance name of the current s3fs mountpoint.
This name will be added to logging messages and user agent headers sent by s3fs.
.TP
\fB\-o\fR complement_stat (complement lack of file/directory mode)
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
@ -268,13 +309,55 @@ However, if there is a directory object other than "dir/" in the bucket, specify
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
Please use this option when the directory in the bucket is only "dir/" object.
.TP
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
S3 requires all object names to be valid UTF-8. But some
clients, notably Windows NFS clients, use their own encoding.
This option re-encodes invalid UTF-8 object names into valid
UTF-8 by mapping offending codes into a 'private' codepage of the
Unicode set.
Useful on clients not using UTF-8 as their file system encoding.
.TP
\fB\-o\fR use_session_token - indicate that session token should be provided.
If credentials are provided by environment variables this switch
forces presence check of AWSSESSIONTOKEN variable.
Otherwise an error is returned.
.TP
\fB\-o\fR requester_pays (default is disable)
This option instructs s3fs to enable requests involving Requester Pays buckets (It includes the 'x-amz-request-payer=requester' entry in the request header).
.TP
\fB\-o\fR mime (default is "/etc/mime.types")
Specify the path of the mime.types file.
If this option is not specified, the existence of "/etc/mime.types" is checked, and that file is loaded as mime information.
If this file does not exist on macOS, then "/etc/apache2/mime.types" is checked as well.
.TP
\fB\-o\fR dbglevel (default="crit")
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
Set the debug message level. set value as crit (critical), err (error), warn (warning), info (information) to debug level. default debug level is critical.
If s3fs run with "-d" option, the debug level is set information.
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
.TP
\fB\-o\fR curldbg - put curl debug message
Put the debug message from libcurl when this option is specified.
Specify "normal" or "body" for the parameter.
If the parameter is omitted, it is the same as "normal".
If "body" is specified, some API communication body data will be output in addition to the debug message output as "normal".
.TP
\fB\-o\fR set_check_cache_sigusr1 (default is stdout)
If the cache is enabled, you can check the integrity of the cache file and the cache file's stats info file.
This option is specified and when sending the SIGUSR1 signal to the s3fs process checks the cache status at that time.
This option can take a file path as parameter to output the check result to that file.
The file path parameter can be omitted. If omitted, the result will be output to stdout or syslog.
.SS "utility mode options"
.TP
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
Lists multipart incomplete objects uploaded to the specified bucket.
.TP
\fB\-\-incomplete\-mpu\-abort\fR all or date format (default="24H")
Delete the multipart incomplete object uploaded to the specified bucket.
If "all" is specified for this option, all multipart incomplete objects will be deleted.
If you specify no argument as an option, objects older than 24 hours (24H) will be deleted (This is the default value).
You can specify an optional date format.
It can be specified as year, month, day, hour, minute, second, and it is expressed as "Y", "M", "D", "h", "m", "s" respectively.
For example, "1Y6M10D12h30m30s".
.SH FUSE/MOUNT OPTIONS
.TP
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
@ -282,15 +365,17 @@ Most of the generic mount options described in 'man mount' are supported (ro, rw
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
.SH NOTES
.TP
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
.TP
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses MD5 checksums to minimize downloads from S3.
.TP
The folder specified by use_cache is just a local cache. It can be deleted at any time. s3fs rebuilds it on demand.
.TP
Local file caching works by calculating and comparing md5 checksums (ETag HTTP header).
Local file caching works by calculating and comparing MD5 checksums (ETag HTTP header).
.TP
s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types!
.SH SEE ALSO
fuse(8), mount(8), fusermount(1), fstab(5)
.SH BUGS
Due to S3's "eventual consistency" limitations, file creation can and will occasionally fail. Even after a successful create, subsequent reads can fail for an indeterminate time, even after one or more successful reads. Create and read enough files and you will eventually encounter this failure. This is not a flaw in s3fs and it is not something a FUSE wrapper like s3fs can work around. The retries option does not address this issue. Your application must either tolerate or compensate for these failures, for example by retrying creates or reads.
.SH AUTHOR

BIN
doc/s3fs.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

View File

@ -24,7 +24,16 @@ if USE_GNUTLS_NETTLE
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
endif
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h addhead.cpp addhead.h common.h
s3fs_SOURCES = \
s3fs.cpp \
curl.cpp \
cache.cpp \
string_util.cpp \
s3fs_util.cpp \
fdcache.cpp \
common_auth.cpp \
addhead.cpp \
sighandlers.cpp
if USE_SSL_OPENSSL
s3fs_SOURCES += openssl_auth.cpp
endif
@ -39,6 +48,9 @@ s3fs_LDADD = $(DEPS_LIBS)
noinst_PROGRAMS = test_string_util
test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h
test_string_util_SOURCES = string_util.cpp test_string_util.cpp
TESTS = test_string_util
clang-tidy:
clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS)

View File

@ -18,11 +18,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <syslog.h>
#include <assert.h>
#include <curl/curl.h>
#include <sstream>
#include <fstream>
@ -56,7 +55,7 @@ AdditionalHeader::AdditionalHeader()
if(this == AdditionalHeader::get()){
is_enable = false;
}else{
assert(false);
abort();
}
}
@ -65,7 +64,7 @@ AdditionalHeader::~AdditionalHeader()
if(this == AdditionalHeader::get()){
Unload();
}else{
assert(false);
abort();
}
}
@ -85,19 +84,19 @@ bool AdditionalHeader::Load(const char* file)
// read file
string line;
PADDHEAD paddhead;
ADDHEAD *paddhead;
while(getline(AH, line)){
if('#' == line[0]){
continue;
}
if(0 == line.size()){
if(line.empty()){
continue;
}
// load a line
stringstream ss(line);
string key(""); // suffix(key)
string head; // additional HTTP header
string value; // header value
istringstream ss(line);
string key; // suffix(key)
string head; // additional HTTP header
string value; // header value
if(0 == isblank(line[0])){
ss >> key;
}
@ -109,8 +108,8 @@ bool AdditionalHeader::Load(const char* file)
}
// check it
if(0 == head.size()){
if(0 == key.size()){
if(head.empty()){
if(key.empty()){
continue;
}
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
@ -123,6 +122,7 @@ bool AdditionalHeader::Load(const char* file)
// regex
if(key.size() <= strlen(ADD_HEAD_REGEX)){
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
delete paddhead;
continue;
}
key = key.substr(strlen(ADD_HEAD_REGEX));
@ -130,8 +130,8 @@ bool AdditionalHeader::Load(const char* file)
// compile
regex_t* preg = new regex_t;
int result;
char errbuf[256];
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
char errbuf[256];
regerror(result, preg, errbuf, sizeof(errbuf));
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
delete preg;
@ -164,12 +164,12 @@ bool AdditionalHeader::Load(const char* file)
return true;
}
void AdditionalHeader::Unload(void)
void AdditionalHeader::Unload()
{
is_enable = false;
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); iter = addheadlist.erase(iter)){
PADDHEAD paddhead = *iter;
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
ADDHEAD *paddhead = *iter;
if(paddhead){
if(paddhead->pregex){
regfree(paddhead->pregex);
@ -178,6 +178,7 @@ void AdditionalHeader::Unload(void)
delete paddhead;
}
}
addheadlist.clear();
}
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
@ -198,7 +199,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
// Because to allow duplicate key, and then scanning the entire table.
//
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
const PADDHEAD paddhead = *iter;
const ADDHEAD *paddhead = *iter;
if(!paddhead){
continue;
}
@ -239,19 +240,19 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch
return list;
}
bool AdditionalHeader::Dump(void) const
bool AdditionalHeader::Dump() const
{
if(!IS_S3FS_LOG_DBG()){
return true;
}
stringstream ssdbg;
int cnt = 1;
ostringstream ssdbg;
int cnt = 1;
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
const PADDHEAD paddhead = *iter;
const ADDHEAD *paddhead = *iter;
ssdbg << " [" << cnt << "] = {" << endl;

View File

@ -31,9 +31,9 @@ typedef struct add_header{
std::string basestring;
std::string headkey;
std::string headvalue;
}ADDHEAD, *PADDHEAD;
}ADDHEAD;
typedef std::vector<PADDHEAD> addheadlist_t;
typedef std::vector<ADDHEAD *> addheadlist_t;
class AdditionalHeader
{

View File

@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <cstdio>
#include <sys/stat.h>
#include <sys/types.h>
#ifndef HAVE_CLOCK_GETTIME
@ -27,8 +27,7 @@
#include <unistd.h>
#include <stdint.h>
#include <pthread.h>
#include <string.h>
#include <assert.h>
#include <cstring>
#include <syslog.h>
#include <string>
#include <map>
@ -59,7 +58,7 @@ using namespace std;
#ifdef HAVE_CLOCK_GETTIME
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
{
return clock_gettime(clk_id, ts);
return clock_gettime(static_cast<clockid_t>(clk_id), ts);
}
#else
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
@ -88,7 +87,7 @@ inline void InitStatCacheTime(struct timespec& ts)
ts.tv_nsec = 0;
}
inline int CompareStatCacheTime(struct timespec& ts1, struct timespec& ts2)
inline int CompareStatCacheTime(const struct timespec& ts1, const struct timespec& ts2)
{
// return -1: ts1 < ts2
// 0: ts1 == ts2
@ -115,7 +114,7 @@ inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expir
}
//
// For cache out
// For stats cache out
//
typedef std::vector<stat_cache_t::iterator> statiterlist_t;
@ -133,6 +132,25 @@ struct sort_statiterlist{
}
};
//
// For symbolic link cache out
//
typedef std::vector<symlink_cache_t::iterator> symlinkiterlist_t;
struct sort_symlinkiterlist{
// ascending order
bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const
{
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats
if(0 == result){
if(src1->second->hit_count < src2->second->hit_count){
result = -1;
}
}
return (result < 0);
}
};
//-------------------------------------------------------------------
// Static
//-------------------------------------------------------------------
@ -142,13 +160,22 @@ pthread_mutex_t StatCache::stat_cache_lock;
//-------------------------------------------------------------------
// Constructor/Destructor
//-------------------------------------------------------------------
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(15 * 60), CacheSize(100000), IsCacheNoObject(false)
{
if(this == StatCache::getStatCacheData()){
stat_cache.clear();
pthread_mutex_init(&(StatCache::stat_cache_lock), NULL);
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int res;
if(0 != (res = pthread_mutex_init(&StatCache::stat_cache_lock, &attr))){
S3FS_PRN_CRIT("failed to init stat_cache_lock: %d", res);
abort();
}
}else{
assert(false);
abort();
}
}
@ -156,16 +183,20 @@ StatCache::~StatCache()
{
if(this == StatCache::getStatCacheData()){
Clear();
pthread_mutex_destroy(&(StatCache::stat_cache_lock));
int res = pthread_mutex_destroy(&StatCache::stat_cache_lock);
if(res != 0){
S3FS_PRN_CRIT("failed to destroy stat_cache_lock: %d", res);
abort();
}
}else{
assert(false);
abort();
}
}
//-------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------
unsigned long StatCache::GetCacheSize(void) const
unsigned long StatCache::GetCacheSize() const
{
return CacheSize;
}
@ -177,7 +208,7 @@ unsigned long StatCache::SetCacheSize(unsigned long size)
return old;
}
time_t StatCache::GetExpireTime(void) const
time_t StatCache::GetExpireTime() const
{
return (IsExpireTime ? ExpireTime : (-1));
}
@ -191,7 +222,7 @@ time_t StatCache::SetExpireTime(time_t expire, bool is_interval)
return old;
}
time_t StatCache::UnsetExpireTime(void)
time_t StatCache::UnsetExpireTime()
{
time_t old = IsExpireTime ? ExpireTime : (-1);
ExpireTime = 0;
@ -207,45 +238,41 @@ bool StatCache::SetCacheNoObject(bool flag)
return old;
}
void StatCache::Clear(void)
void StatCache::Clear()
{
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock);
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); stat_cache.erase(iter++)){
if((*iter).second){
delete (*iter).second;
}
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
delete (*iter).second;
}
stat_cache.clear();
S3FS_MALLOCTRIM(0);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
}
bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce)
bool StatCache::GetStat(const string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce)
{
bool is_delete_cache = false;
string strpath = key;
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.end();
if(overcheck && '/' != strpath[strpath.length() - 1]){
strpath += "/";
iter = stat_cache.find(strpath.c_str());
iter = stat_cache.find(strpath);
}
if(iter == stat_cache.end()){
strpath = key;
iter = stat_cache.find(strpath.c_str());
iter = stat_cache.find(strpath);
}
if(iter != stat_cache.end() && (*iter).second){
stat_cache_entry* ent = (*iter).second;
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
if(ent->noobjcache){
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(!IsCacheNoObject){
// need to delete this cache.
DelStat(strpath);
DelStat(strpath, /*lock_already_held=*/ true);
}else{
// noobjcache = true means no object.
}
@ -255,10 +282,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
string stretag;
if(petag){
// find & check ETag
for(headers_t::iterator iter = ent->meta.begin(); iter != ent->meta.end(); ++iter){
string tag = lower(iter->first);
for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){
string tag = lower(hiter->first);
if(tag == "etag"){
stretag = iter->second;
stretag = hiter->second;
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
is_delete_cache = true;
}
@ -268,12 +295,12 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
}
if(is_delete_cache){
// not hit by different ETag
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%jd.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%lld.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
}else{
// hit
S3FS_PRN_DBG("stat cache hit [path=%s][time=%jd.%09ld][hit count=%lu]",
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
if(pst!= NULL){
*pst= ent->stbuf;
@ -289,7 +316,6 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
if(IsExpireIntervalType){
SetStatCacheTime(ent->cache_date);
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
@ -298,15 +324,14 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
is_delete_cache = true;
}
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(is_delete_cache){
DelStat(strpath);
DelStat(strpath, /*lock_already_held=*/ true);
}
return false;
}
bool StatCache::IsNoObjectCache(string& key, bool overcheck)
bool StatCache::IsNoObjectCache(const string& key, bool overcheck)
{
bool is_delete_cache = false;
string strpath = key;
@ -315,16 +340,16 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
return false;
}
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.end();
if(overcheck && '/' != strpath[strpath.length() - 1]){
strpath += "/";
iter = stat_cache.find(strpath.c_str());
iter = stat_cache.find(strpath);
}
if(iter == stat_cache.end()){
strpath = key;
iter = stat_cache.find(strpath.c_str());
iter = stat_cache.find(strpath);
}
if(iter != stat_cache.end() && (*iter).second) {
@ -332,7 +357,6 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
if((*iter).second->noobjcache){
// noobjcache = true means no object.
SetStatCacheTime((*iter).second->cache_date);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
}else{
@ -340,27 +364,27 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
is_delete_cache = true;
}
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(is_delete_cache){
DelStat(strpath);
DelStat(strpath, /*lock_already_held=*/ true);
}
return false;
}
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
bool StatCache::AddStat(const std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
{
if(!no_truncate && CacheSize< 1){
return true;
}
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
pthread_mutex_lock(&StatCache::stat_cache_lock);
bool found = stat_cache.end() != stat_cache.find(key);
bool do_truncate = stat_cache.size() > CacheSize;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
bool found;
bool do_truncate;
{
AutoLock lock(&StatCache::stat_cache_lock);
found = stat_cache.end() != stat_cache.find(key);
do_truncate = stat_cache.size() > CacheSize;
}
if(found){
DelStat(key.c_str());
@ -402,23 +426,27 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool n
}
// add
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
if(stat_cache.end() != iter){
if(iter->second){
delete iter->second;
}
delete iter->second;
stat_cache.erase(iter);
}
stat_cache[key] = ent;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
// check symbolic link cache
if(!S_ISLNK(ent->stbuf.st_mode)){
if(symlink_cache.end() != symlink_cache.find(key)){
// if symbolic link cache has key, thus remove it.
DelSymlink(key.c_str(), true);
}
}
return true;
}
bool StatCache::AddNoObjectCache(string& key)
bool StatCache::AddNoObjectCache(const string& key)
{
if(!IsCacheNoObject){
return true; // pretend successful
@ -428,12 +456,13 @@ bool StatCache::AddNoObjectCache(string& key)
}
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
pthread_mutex_lock(&StatCache::stat_cache_lock);
bool found = stat_cache.end() != stat_cache.find(key);
bool do_truncate = stat_cache.size() > CacheSize;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
bool found;
bool do_truncate;
{
AutoLock lock(&StatCache::stat_cache_lock);
found = stat_cache.end() != stat_cache.find(key);
do_truncate = stat_cache.size() > CacheSize;
}
if(found){
DelStat(key.c_str());
@ -456,26 +485,27 @@ bool StatCache::AddNoObjectCache(string& key)
SetStatCacheTime(ent->cache_date); // Set time.
// add
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
if(stat_cache.end() != iter){
if(iter->second){
delete iter->second;
}
delete iter->second;
stat_cache.erase(iter);
}
stat_cache[key] = ent;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
// check symbolic link cache
if(symlink_cache.end() != symlink_cache.find(key)){
// if symbolic link cache has key, thus remove it.
DelSymlink(key.c_str(), true);
}
return true;
}
void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate)
{
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key);
if(stat_cache.end() != iter){
@ -490,25 +520,22 @@ void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
}
}
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
}
bool StatCache::TruncateCache(void)
bool StatCache::TruncateCache()
{
AutoLock lock(&StatCache::stat_cache_lock);
if(stat_cache.empty()){
return true;
}
pthread_mutex_lock(&StatCache::stat_cache_lock);
// 1) erase over expire time
if(IsExpireTime){
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
stat_cache_entry* entry = iter->second;
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
if(entry){
delete entry;
}
delete entry;
stat_cache.erase(iter++);
}else{
++iter;
@ -518,59 +545,55 @@ bool StatCache::TruncateCache(void)
// 2) check stat cache count
if(stat_cache.size() < CacheSize){
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
// 3) erase from the old cache in order
size_t erase_count= stat_cache.size() - CacheSize + 1;
statiterlist_t erase_iters;
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){
// check no truncate
stat_cache_entry* ent = iter->second;
if(ent && 0L < ent->notruncate){
// skip for no truncate entry
// skip for no truncate entry and keep extra counts for this entity.
if(0 < erase_count){
--erase_count; // decrement
}
}else{
// iter is not have notruncate flag
erase_iters.push_back(iter);
}
// iter is not have notruncate flag
erase_iters.push_back(iter);
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
if(erase_count < erase_iters.size()){
erase_iters.pop_back();
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
while(erase_count < erase_iters.size()){
erase_iters.pop_back();
}
}
}
for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
stat_cache_t::iterator siter = *iiter;
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
if(siter->second){
delete siter->second;
}
delete siter->second;
stat_cache.erase(siter);
}
S3FS_MALLOCTRIM(0);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
bool StatCache::DelStat(const char* key)
bool StatCache::DelStat(const char* key, bool lock_already_held)
{
if(!key){
return false;
}
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
pthread_mutex_lock(&StatCache::stat_cache_lock);
AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
stat_cache_t::iterator iter;
if(stat_cache.end() != (iter = stat_cache.find(string(key)))){
if((*iter).second){
delete (*iter).second;
}
delete (*iter).second;
stat_cache.erase(iter);
}
if(0 < strlen(key) && 0 != strcmp(key, "/")){
@ -582,16 +605,157 @@ bool StatCache::DelStat(const char* key)
// If there is "path/" cache, delete it.
strpath += "/";
}
if(stat_cache.end() != (iter = stat_cache.find(strpath.c_str()))){
if((*iter).second){
delete (*iter).second;
}
if(stat_cache.end() != (iter = stat_cache.find(strpath))){
delete (*iter).second;
stat_cache.erase(iter);
}
}
S3FS_MALLOCTRIM(0);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
bool StatCache::GetSymlink(const string& key, string& value)
{
bool is_delete_cache = false;
const string& strpath = key;
AutoLock lock(&StatCache::stat_cache_lock);
symlink_cache_t::iterator iter = symlink_cache.find(strpath);
if(iter != symlink_cache.end() && iter->second){
symlink_cache_entry* ent = iter->second;
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats
// found
S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
value = ent->link;
ent->hit_count++;
if(IsExpireIntervalType){
SetStatCacheTime(ent->cache_date);
}
return true;
}else{
// timeout
is_delete_cache = true;
}
}
if(is_delete_cache){
DelSymlink(strpath.c_str(), /*lock_already_held=*/ true);
}
return false;
}
bool StatCache::AddSymlink(const string& key, const string& value)
{
if(CacheSize< 1){
return true;
}
S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str());
bool found;
bool do_truncate;
{
AutoLock lock(&StatCache::stat_cache_lock);
found = symlink_cache.end() != symlink_cache.find(key);
do_truncate = symlink_cache.size() > CacheSize;
}
if(found){
DelSymlink(key.c_str());
}else{
if(do_truncate){
if(!TruncateSymlink()){
return false;
}
}
}
// make new
symlink_cache_entry* ent = new symlink_cache_entry();
ent->link = value;
ent->hit_count = 0;
SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats).
// add
AutoLock lock(&StatCache::stat_cache_lock);
symlink_cache_t::iterator iter = symlink_cache.find(key); // recheck for same key exists
if(symlink_cache.end() != iter){
delete iter->second;
symlink_cache.erase(iter);
}
symlink_cache[key] = ent;
return true;
}
bool StatCache::TruncateSymlink()
{
AutoLock lock(&StatCache::stat_cache_lock);
if(symlink_cache.empty()){
return true;
}
// 1) erase over expire time
if(IsExpireTime){
for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){
symlink_cache_entry* entry = iter->second;
if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats
delete entry;
symlink_cache.erase(iter++);
}else{
++iter;
}
}
}
// 2) check stat cache count
if(symlink_cache.size() < CacheSize){
return true;
}
// 3) erase from the old cache in order
size_t erase_count= symlink_cache.size() - CacheSize + 1;
symlinkiterlist_t erase_iters;
for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){
erase_iters.push_back(iter);
sort(erase_iters.begin(), erase_iters.end(), sort_symlinkiterlist());
if(erase_count < erase_iters.size()){
erase_iters.pop_back();
}
}
for(symlinkiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
symlink_cache_t::iterator siter = *iiter;
S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str());
delete siter->second;
symlink_cache.erase(siter);
}
S3FS_MALLOCTRIM(0);
return true;
}
bool StatCache::DelSymlink(const char* key, bool lock_already_held)
{
if(!key){
return false;
}
S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key);
AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
symlink_cache_t::iterator iter;
if(symlink_cache.end() != (iter = symlink_cache.find(string(key)))){
delete iter->second;
symlink_cache.erase(iter);
}
S3FS_MALLOCTRIM(0);
return true;
}
@ -620,6 +784,9 @@ bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst,
// mtime
pst->st_mtime = get_mtime(meta);
// ctime
pst->st_ctime = get_ctime(meta);
// size
pst->st_size = get_size(meta);

View File

@ -24,7 +24,7 @@
#include "common.h"
//
// Struct
// Struct for stats cache
//
struct stat_cache_entry {
struct stat stbuf;
@ -45,29 +45,57 @@ struct stat_cache_entry {
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
//
// Struct for symbolic link cache
//
struct symlink_cache_entry {
std::string link;
unsigned long hit_count;
struct timespec cache_date; // The function that operates timespec uses the same as Stats
symlink_cache_entry() : link(""), hit_count(0) {
cache_date.tv_sec = 0;
cache_date.tv_nsec = 0;
}
};
typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
//
// Class
//
// [NOTE] About Symbolic link cache
// The Stats cache class now also has a symbolic link cache.
// It is possible to take out the Symbolic link cache in another class,
// but the cache out etc. should be synchronized with the Stats cache
// and implemented in this class.
// Symbolic link cache size and timeout use the same settings as Stats
// cache. This simplifies user configuration, and from a user perspective,
// the symbolic link cache appears to be included in the Stats cache.
//
class StatCache
{
private:
static StatCache singleton;
static pthread_mutex_t stat_cache_lock;
stat_cache_t stat_cache;
bool IsExpireTime;
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
time_t ExpireTime;
unsigned long CacheSize;
bool IsCacheNoObject;
stat_cache_t stat_cache;
bool IsExpireTime;
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
time_t ExpireTime;
unsigned long CacheSize;
bool IsCacheNoObject;
symlink_cache_t symlink_cache;
private:
StatCache();
~StatCache();
void Clear(void);
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
// Truncate stat cache
bool TruncateCache(void);
// Truncate symbolic link cache
bool TruncateSymlink(void);
public:
// Reference singleton
@ -93,37 +121,42 @@ class StatCache
}
// Get stat cache
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
}
bool GetStat(std::string& key, struct stat* pst, bool overcheck = true) {
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true) {
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
}
bool GetStat(std::string& key, headers_t* meta, bool overcheck = true) {
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true) {
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
}
bool HasStat(std::string& key, bool overcheck = true) {
bool HasStat(const std::string& key, bool overcheck = true) {
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
}
bool HasStat(std::string& key, const char* etag, bool overcheck = true) {
bool HasStat(const std::string& key, const char* etag, bool overcheck = true) {
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
}
// Cache For no object
bool IsNoObjectCache(std::string& key, bool overcheck = true);
bool AddNoObjectCache(std::string& key);
bool IsNoObjectCache(const std::string& key, bool overcheck = true);
bool AddNoObjectCache(const std::string& key);
// Add stat cache
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
// Change no truncate flag
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
// Delete stat cache
bool DelStat(const char* key);
bool DelStat(std::string& key) {
return DelStat(key.c_str());
bool DelStat(const char* key, bool lock_already_held = false);
bool DelStat(std::string& key, bool lock_already_held = false) {
return DelStat(key.c_str(), lock_already_held);
}
// Cache for symbolic link
bool GetSymlink(const std::string& key, std::string& value);
bool AddSymlink(const std::string& key, const std::string& value);
bool DelSymlink(const char* key, bool lock_already_held = false);
};
//

View File

@ -21,6 +21,7 @@
#ifndef S3FS_COMMON_H_
#define S3FS_COMMON_H_
#include <stdlib.h>
#include "../config.h"
//
@ -37,7 +38,7 @@
//
// Macro
//
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
//
// Debug level
@ -75,30 +76,65 @@ enum s3fs_log_level{
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
do{ \
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
if(foreground){ \
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \
} \
}
} \
}while(0)
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
do{ \
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
if(foreground){ \
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \
} \
}
} \
}while(0)
#define S3FS_LOW_CURLDBG(fmt, ...) \
do{ \
if(foreground){ \
fprintf(stdout, "[CURL DBG] " fmt "%s\n", __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
do{ \
if(foreground){ \
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
}else{ \
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
}
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
// Special macro for init message
#define S3FS_PRN_INIT_INFO(fmt, ...) \
do{ \
if(foreground){ \
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
} \
}while(0)
// Special macro for checking cache files
#define S3FS_LOW_CACHE(fp, fmt, ...) \
do{ \
if(foreground){ \
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
// [NOTE]
// small trick for VA_ARGS
@ -113,7 +149,8 @@ enum s3fs_log_level{
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
//
// Typedef
@ -138,9 +175,7 @@ typedef struct xattr_value{
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
~xattr_value()
{
if(pvalue){
free(pvalue);
}
delete[] pvalue;
}
}XATTRVAL, *PXATTRVAL;
@ -149,6 +184,7 @@ typedef std::map<std::string, PXATTRVAL> xattrs_t;
//
// Global variables
//
// TODO: namespace these
extern bool foreground;
extern bool nomultipart;
extern bool pathrequeststyle;
@ -160,6 +196,7 @@ extern std::string bucket;
extern std::string mount_prefix;
extern std::string endpoint;
extern std::string cipher_suites;
extern std::string instance_name;
extern s3fs_log_level debug_level;
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];

View File

@ -18,10 +18,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#include "s3fs_auth.h"
@ -44,10 +44,10 @@ string s3fs_get_content_md5(int fd)
if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){
return string(""); // ENOMEM
}
free(md5hex);
delete[] md5hex;
Signature = base64;
free(base64);
delete[] base64;
return Signature;
}
@ -62,7 +62,7 @@ string s3fs_md5sum(int fd, off_t start, ssize_t size)
}
std::string md5 = s3fs_hex(md5hex, digestlen);
free(md5hex);
delete[] md5hex;
return md5;
}
@ -71,7 +71,6 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
{
size_t digestlen = get_sha256_digest_length();
char sha256[2 * digestlen + 1];
char hexbuf[3];
unsigned char* sha256hex;
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
@ -80,10 +79,9 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
memset(sha256, 0, 2 * digestlen + 1);
for(size_t pos = 0; pos < digestlen; pos++){
snprintf(hexbuf, 3, "%02x", sha256hex[pos]);
strncat(sha256, hexbuf, 2);
snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]);
}
free(sha256hex);
delete[] sha256hex;
return string(sha256);
}

File diff suppressed because it is too large Load Diff

View File

@ -23,10 +23,45 @@
#include <cassert>
#include "psemaphore.h"
//----------------------------------------------
// Avoid dependency on libcurl version
//----------------------------------------------
// [NOTE]
// The following symbols (enum) depend on the version of libcurl.
// CURLOPT_TCP_KEEPALIVE 7.25.0 and later
// CURLOPT_SSL_ENABLE_ALPN 7.36.0 and later
// CURLOPT_KEEP_SENDING_ON_ERROR 7.51.0 and later
//
// s3fs uses these, if you build s3fs with the old libcurl,
// substitute the following symbols to avoid errors.
// If the version of libcurl linked at runtime is old,
// curl_easy_setopt results in an error(CURLE_UNKNOWN_OPTION) and
// a message is output.
//
#if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1)
#define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE
#else
#define S3FS_CURLOPT_TCP_KEEPALIVE static_cast<CURLoption>(213)
#endif
#if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1)
#define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN
#else
#define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast<CURLoption>(226)
#endif
#if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1)
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR
#else
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast<CURLoption>(245)
#endif
//----------------------------------------------
// Symbols
//----------------------------------------------
#define MIN_MULTIPART_SIZE 5242880 // 5MB
static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
//----------------------------------------------
// class BodyData
@ -75,7 +110,7 @@ struct filepart
std::string etag; // expected etag value
int fd; // base file(temporary full file) descriptor
off_t startpos; // seek fd point for uploading
ssize_t size; // uploading size
off_t size; // uploading size
etaglist_t* etaglist; // use only parallel upload
int etagpos; // use only parallel upload
@ -126,14 +161,12 @@ class S3fsMultiCurl;
//----------------------------------------------
// class CurlHandlerPool
//----------------------------------------------
typedef std::list<CURL*> hcurllist_t;
class CurlHandlerPool
{
public:
explicit CurlHandlerPool(int maxHandlers)
: mMaxHandlers(maxHandlers)
, mHandlers(NULL)
, mIndex(-1)
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
{
assert(maxHandlers > 0);
}
@ -141,20 +174,25 @@ public:
bool Init();
bool Destroy();
CURL* GetHandler();
void ReturnHandler(CURL* h);
CURL* GetHandler(bool only_pool);
void ReturnHandler(CURL* hCurl, bool restore_pool);
private:
int mMaxHandlers;
int mMaxHandlers;
pthread_mutex_t mLock;
CURL** mHandlers;
int mIndex;
hcurllist_t mPool;
};
//----------------------------------------------
// class S3fsCurl
//----------------------------------------------
#include "fdcache.h" // for fdpage_list_t
class S3fsCurl;
// Prototype function for lazy setup options for curl handle
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
typedef std::map<std::string, std::string> iamcredmap_t;
typedef std::map<std::string, std::string> sseckeymap_t;
typedef std::list<sseckeymap_t> sseckeylist_t;
@ -163,7 +201,22 @@ typedef std::list<sseckeymap_t> sseckeylist_t;
enum storage_class_t {
STANDARD,
STANDARD_IA,
REDUCED_REDUNDANCY
ONEZONE_IA,
REDUCED_REDUNDANCY,
INTELLIGENT_TIERING,
GLACIER
};
enum acl_t {
PRIVATE,
PUBLIC_READ,
PUBLIC_READ_WRITE,
AWS_EXEC_READ,
AUTHENTICATED_READ,
BUCKET_OWNER_READ,
BUCKET_OWNER_FULL_CONTROL,
LOG_DELIVERY_WRITE,
INVALID_ACL
};
// sse type
@ -175,9 +228,11 @@ enum sse_type_t {
};
// share
#define SHARE_MUTEX_DNS 0
#define SHARE_MUTEX_SSL_SESSION 1
#define SHARE_MUTEX_MAX 2
enum {
SHARE_MUTEX_DNS = 0,
SHARE_MUTEX_SSL_SESSION = 1,
SHARE_MUTEX_MAX = 2,
};
// Class for lapping curl
//
@ -219,27 +274,38 @@ class S3fsCurl
static time_t readwrite_timeout;
static int retries;
static bool is_public_bucket;
static std::string default_acl; // TODO: to enum
static acl_t default_acl;
static storage_class_t storage_class;
static sseckeylist_t sseckeys;
static std::string ssekmsid;
static sse_type_t ssetype;
static bool is_content_md5;
static bool is_verbose;
static bool is_dump_body;
static std::string AWSAccessKeyId;
static std::string AWSSecretAccessKey;
static std::string AWSAccessToken;
static time_t AWSAccessTokenExpire;
static bool is_ecs;
static bool is_use_session_token;
static bool is_ibm_iam_auth;
static std::string IAM_cred_url;
static size_t IAM_field_count;
static std::string IAM_token_field;
static std::string IAM_expiry_field;
static std::string IAM_role;
static long ssl_verify_hostname;
static curltime_t curl_times;
static curlprogress_t curl_progress;
static std::string curl_ca_bundle;
static mimes_t mimeTypes;
static std::string userAgent;
static int max_parallel_cnt;
static int max_multireq;
static off_t multipart_size;
static bool is_sigv4;
static bool is_ua; // User-Agent
static bool requester_pays;
// variables
CURL* hCurl;
@ -250,9 +316,9 @@ class S3fsCurl
std::string url; // target object path(url)
struct curl_slist* requestHeaders;
headers_t responseHeaders; // header data by HeaderCallback
BodyData* bodydata; // body data by WriteMemoryCallback
BodyData* headdata; // header data by WriteMemoryCallback
long LastResponseCode;
BodyData bodydata; // body data by WriteMemoryCallback
BodyData headdata; // header data by WriteMemoryCallback
volatile long LastResponseCode;
const unsigned char* postdata; // use by post method and read callback function.
int postdata_remaining; // use by post method and read callback function.
filepart partdata; // use by multipart upload/get object callback
@ -266,6 +332,14 @@ class S3fsCurl
int b_ssekey_pos; // backup for retrying
std::string b_ssevalue; // backup for retrying
sse_type_t b_ssetype; // backup for retrying
std::string b_from; // backup for retrying(for copy request)
headers_t b_meta; // backup for retrying(for copy request)
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
std::string query_string; // request query string
Semaphore *sem;
pthread_mutex_t *completed_tids_lock;
std::vector<pthread_t> *completed_tids;
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
public:
// constructor/destructor
@ -284,7 +358,6 @@ class S3fsCurl
static bool DestroyCryptMutex(void);
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
static bool InitMimeType(const char* MimeFile = NULL);
static bool LocateBundle(void);
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
@ -293,9 +366,19 @@ class S3fsCurl
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl);
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
// lazy functions for set curl options
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
static bool SetIAMCredentials(const char* response);
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
@ -306,26 +389,35 @@ class S3fsCurl
static bool AddUserAgent(CURL* hCurl);
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
// methods
bool ResetHandle(void);
bool RemakeHandle(void);
bool ClearInternalData(void);
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
void insertV4Headers();
void insertV2Headers();
void insertIBMIAMHeaders();
void insertAuthHeaders();
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
bool GetUploadId(std::string& upload_id);
int GetIAMCredentials(void);
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
bool UploadMultipartPostComplete();
bool CopyMultipartPostComplete();
bool MixMultipartPostComplete();
public:
// class methods
static bool InitS3fsCurl(const char* MimeFile = NULL);
static bool InitS3fsCurl(void);
static bool InitMimeType(const std::string& strFile);
static bool DestroyS3fsCurl(void);
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
static bool CheckIAMCredentialUpdate(void);
@ -340,7 +432,8 @@ class S3fsCurl
static int SetRetries(int count);
static bool SetPublicBucket(bool flag);
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
static std::string SetDefaultAcl(const char* acl);
static acl_t SetDefaultAcl(acl_t acl);
static acl_t GetDefaultAcl();
static storage_class_t SetStorageClass(storage_class_t storage_class);
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
@ -361,14 +454,30 @@ class S3fsCurl
static bool SetContentMd5(bool flag);
static bool SetVerbose(bool flag);
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
static bool SetDumpBody(bool flag);
static bool IsDumpBody(void) { return S3fsCurl::is_dump_body; }
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
static bool IsSetAccessKeyId(void){
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken);
static bool IsSetAccessKeyID(void){
return (0 < S3fsCurl::AWSAccessKeyId.size());
}
static bool IsSetAccessKeys(void){
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
}
static long SetSslVerifyHostname(long value);
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
// maximum parallel GET and PUT requests
static int SetMaxParallelCount(int value);
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
// maximum parallel HEAD requests
static int SetMaxMultiRequest(int max);
static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; }
static bool SetIsECS(bool flag);
static bool SetIsIBMIAMAuth(bool flag);
static size_t SetIAMFieldCount(size_t field_count);
static std::string SetIAMCredentialsURL(const char* url);
static std::string SetIAMTokenField(const char* token_field);
static std::string SetIAMExpiryField(const char* expiry_field);
static std::string SetIAMRole(const char* role);
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
static bool SetMultipartSize(off_t size);
@ -377,15 +486,18 @@ class S3fsCurl
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
static void InitUserAgent(void);
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; }
// methods
bool CreateCurlHandle(bool force = false);
bool DestroyCurlHandle(void);
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
bool LoadIAMRoleFromMetaData(void);
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
bool GetResponseCode(long& responseCode);
int RequestPerform(void);
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
int RequestPerform(bool dontAddAuthHeaders=false);
int DeleteRequest(const char* tpath);
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) {
@ -399,13 +511,13 @@ class S3fsCurl
int CheckBucket(void);
int ListBucketRequest(const char* tpath, const char* query);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
int MultipartListRequest(std::string& body);
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list);
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
// methods(variables)
@ -414,9 +526,10 @@ class S3fsCurl
std::string GetBasePath(void) const { return base_path; }
std::string GetSpacialSavedPath(void) const { return saved_path; }
std::string GetUrl(void) const { return url; }
std::string GetOp(void) const { return op; }
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
BodyData* GetBodyData(void) const { return bodydata; }
BodyData* GetHeadData(void) const { return headdata; }
BodyData* GetBodyData(void) { return &bodydata; }
BodyData* GetHeadData(void) { return &headdata; }
long GetLastResponseCode(void) const { return LastResponseCode; }
bool SetUseAhbe(bool ahbe);
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
@ -433,21 +546,24 @@ class S3fsCurl
//----------------------------------------------
// Class for lapping multi curl
//
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
typedef std::vector<S3fsCurl*> s3fscurllist_t;
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
class S3fsMultiCurl
{
private:
static int max_multireq;
const int maxParallelism;
s3fscurlmap_t cMap_all; // all of curl requests
s3fscurlmap_t cMap_req; // curl requests are sent
s3fscurllist_t clist_all; // all of curl requests
s3fscurllist_t clist_req; // curl requests are sent
S3fsMultiSuccessCallback SuccessCallback;
S3fsMultiRetryCallback RetryCallback;
pthread_mutex_t completed_tids_lock;
std::vector<pthread_t> completed_tids;
private:
bool ClearEx(bool is_all);
int MultiPerform(void);
@ -456,11 +572,10 @@ class S3fsMultiCurl
static void* RequestPerformWrapper(void* arg);
public:
S3fsMultiCurl();
explicit S3fsMultiCurl(int maxParallelism);
~S3fsMultiCurl();
static int SetMaxMultiRequest(int max);
static int GetMaxMultiRequest(void) { return S3fsMultiCurl::max_multireq; }
int GetMaxParallelism() { return maxParallelism; }
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
@ -479,9 +594,12 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* d
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
std::string get_sorted_header_keys(const struct curl_slist* list);
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
std::string get_header_value(const struct curl_slist* list, const std::string &key);
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
std::string prepare_url(const char* url);
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
const char *acl_to_string(acl_t acl);
acl_t string_to_acl(const char *acl);
#endif // S3FS_CURL_H_

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,6 @@
#define FD_CACHE_H_
#include <sys/statvfs.h>
#include "curl.h"
//------------------------------------------------
// CacheFileStat
@ -35,15 +34,20 @@ class CacheFileStat
private:
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
bool RawOpen(bool readonly);
public:
static std::string GetCacheFileStatTopDir(void);
static bool DeleteCacheFileStat(const char* path);
static bool CheckCacheFileStatTopDir(void);
static bool DeleteCacheFileStatDirectory(void);
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
explicit CacheFileStat(const char* tpath = NULL);
~CacheFileStat();
bool Open(void);
bool ReadOnlyOpen(void);
bool Release(void);
bool SetPath(const char* tpath, bool is_open = true);
int GetFd(void) const { return fd; }
@ -56,22 +60,24 @@ class CacheFileStat
struct fdpage
{
off_t offset;
size_t bytes;
off_t bytes;
bool loaded;
bool modified;
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
: offset(start), bytes(size), loaded(is_loaded) {}
fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false)
: offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
off_t next(void) const { return (offset + bytes); }
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
};
typedef std::list<struct fdpage*> fdpage_list_t;
typedef std::list<struct fdpage> fdpage_list_t;
class FdEntity;
//
// Management of loading area/modifying
//
// cppcheck-suppress copyCtorAndEqOperator
class PageList
{
friend class FdEntity; // only one method access directly pages.
@ -79,29 +85,47 @@ class PageList
private:
fdpage_list_t pages;
public:
enum page_status{
PAGE_NOT_LOAD_MODIFIED = 0,
PAGE_LOADED,
PAGE_MODIFIED,
PAGE_LOAD_MODIFIED
};
private:
static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list);
static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes);
static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
void Clear(void);
bool Compress(void);
bool Compress();
bool Parse(off_t new_pos);
public:
static void FreeList(fdpage_list_t& list);
explicit PageList(size_t size = 0, bool is_loaded = false);
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false);
explicit PageList(const PageList& other);
~PageList();
bool Init(size_t size, bool is_loaded);
size_t Size(void) const;
bool Resize(size_t size, bool is_loaded);
bool Init(off_t size, bool is_loaded, bool is_modified);
off_t Size(void) const;
bool Resize(off_t size, bool is_loaded, bool is_modified);
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize);
bool Serialize(CacheFileStat& file, bool is_output);
void Dump(void);
bool IsModified(void) const;
bool ClearAllModified(void);
bool Serialize(CacheFileStat& file, bool is_output, ino_t inode);
void Dump(void) const;
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
};
//------------------------------------------------
@ -110,61 +134,71 @@ class PageList
class FdEntity
{
private:
static bool mixmultipart; // whether multipart uploading can use copy api.
pthread_mutex_t fdent_lock;
bool is_lock_init;
PageList pagelist;
int refcnt; // reference count
std::string path; // object path
std::string cachepath; // local cache file path
// (if this is empty, does not load/save pagelist.)
std::string mirrorpath; // mirror file path to local cache file path
int fd; // file descriptor(tmp file or cache file)
FILE* pfile; // file pointer(tmp file or cache file)
bool is_modify; // if file is changed, this flag is true
ino_t inode; // inode number for cache file
headers_t orgmeta; // original headers at opening
size_t size_orgmeta; // original file size in original headers
off_t size_orgmeta; // original file size in original headers
pthread_mutex_t fdent_data_lock;// protects the following members
PageList pagelist;
std::string upload_id; // for no cached multipart uploading when no disk space
etaglist_t etaglist; // for no cached multipart uploading when no disk space
off_t mp_start; // start position for no cached multipart(write method only)
size_t mp_size; // size for no cached multipart(write method only)
off_t mp_size; // size for no cached multipart(write method only)
std::string cachepath; // local cache file path
// (if this is empty, does not load/save pagelist.)
std::string mirrorpath; // mirror file path to local cache file path
private:
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
static ino_t GetInode(int fd);
void Clear(void);
ino_t GetInode(void);
int OpenMirrorFile(void);
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
public:
static bool SetNoMixMultipart(void);
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
~FdEntity();
void Close(void);
bool IsOpen(void) const { return (-1 != fd); }
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
int Dup(bool no_fd_lock_wait = false);
int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
int Dup(bool lock_already_held = false);
const char* GetPath(void) const { return path.c_str(); }
void SetPath(const std::string &newpath) { path = newpath; }
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
int GetFd(void) const { return fd; }
bool IsModified(void) const { return pagelist.IsModified(); }
bool GetStats(struct stat& st);
int SetMtime(time_t time);
bool GetStats(struct stat& st, bool lock_already_held = false);
int SetCtime(time_t time);
int SetMtime(time_t time, bool lock_already_held = false);
bool UpdateCtime(void);
bool UpdateMtime(void);
bool GetSize(size_t& size);
bool GetSize(off_t& size);
bool SetMode(mode_t mode);
bool SetUId(uid_t uid);
bool SetGId(gid_t gid);
bool SetContentType(const char* path);
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end
int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end
int NoCachePreMultipartPost(void);
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
int NoCacheMultipartPost(int tgfd, off_t start, off_t size);
int NoCacheCompleteMultipartPost(void);
int RowFlush(const char* tpath, bool force_sync = false);
@ -173,7 +207,7 @@ class FdEntity
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
ssize_t Write(const char* bytes, off_t start, size_t size);
void CleanupCache();
bool ReserveDiskSpace(off_t size);
};
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
@ -186,16 +220,19 @@ class FdManager
static FdManager singleton;
static pthread_mutex_t fd_manager_lock;
static pthread_mutex_t cache_cleanup_lock;
static pthread_mutex_t reserved_diskspace_lock;
static bool is_lock_init;
static std::string cache_dir;
static bool check_cache_dir_exist;
static size_t free_disk_space; // limit free disk space
static off_t free_disk_space; // limit free disk space
static std::string check_cache_output;
fdent_map_t fent;
private:
static fsblkcnt_t GetFreeDiskSpace(const char* path);
static off_t GetFreeDiskSpace(const char* path);
void CleanupCacheDirInternal(const std::string &path = "");
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
public:
FdManager();
@ -207,26 +244,32 @@ class FdManager
static bool DeleteCacheDirectory(void);
static int DeleteCacheFile(const char* path);
static bool SetCacheDir(const char* dir);
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
static bool IsCacheDir(void) { return !FdManager::cache_dir.empty(); }
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
static bool SetCacheCheckOutput(const char* path);
static const char* GetCacheCheckOutput(void) { return FdManager::check_cache_output.c_str(); }
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
static bool CheckCacheTopDir(void);
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
static bool SetCheckCacheDirExist(bool is_check);
static bool CheckCacheDirExist(void);
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
static size_t SetEnsureFreeDiskSpace(size_t size);
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
static bool IsSafeDiskSpace(const char* path, size_t size);
static off_t GetEnsureFreeDiskSpace();
static off_t SetEnsureFreeDiskSpace(off_t size);
static bool IsSafeDiskSpace(const char* path, off_t size);
static void FreeReservedDiskSpace(off_t size);
static bool ReserveDiskSpace(off_t size);
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
FdEntity* GetFdEntity(const char* path, int existfd = -1);
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
void Rename(const std::string &from, const std::string &to);
bool Close(FdEntity* ent);
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
void CleanupCacheDir();
bool CheckAllCache(void);
};
#endif // FD_CACHE_H_

View File

@ -57,7 +57,7 @@ const char* s3fs_crypt_lib_name(void)
#else // USE_GNUTLS_NETTLE
const char* s3fs_crypt_lib_name(void)
const char* s3fs_crypt_lib_name()
{
static const char version[] = "GnuTLS(gcrypt)";
@ -69,15 +69,20 @@ const char* s3fs_crypt_lib_name(void)
//-------------------------------------------------------------------
// Utility Function for global init
//-------------------------------------------------------------------
bool s3fs_init_global_ssl(void)
bool s3fs_init_global_ssl()
{
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
return false;
}
#ifndef USE_GNUTLS_NETTLE
if(NULL == gcry_check_version(NULL)){
return false;
}
#endif // USE_GNUTLS_NETTLE
return true;
}
bool s3fs_destroy_global_ssl(void)
bool s3fs_destroy_global_ssl()
{
gnutls_global_deinit();
return true;
@ -86,12 +91,12 @@ bool s3fs_destroy_global_ssl(void)
//-------------------------------------------------------------------
// Utility Function for crypt lock
//-------------------------------------------------------------------
bool s3fs_init_crypt_mutex(void)
bool s3fs_init_crypt_mutex()
{
return true;
}
bool s3fs_destroy_crypt_mutex(void)
bool s3fs_destroy_crypt_mutex()
{
return true;
}
@ -107,9 +112,7 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(SHA1_DIGEST_SIZE))){
return false;
}
*digest = new unsigned char[SHA1_DIGEST_SIZE];
struct hmac_sha1_ctx ctx_hmac;
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
@ -126,9 +129,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(SHA256_DIGEST_SIZE))){
return false;
}
*digest = new unsigned char[SHA256_DIGEST_SIZE];
struct hmac_sha256_ctx ctx_hmac;
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
@ -150,11 +151,9 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
return false;
}
*digest = new unsigned char[*digestlen + 1];
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
free(*digest);
delete[] *digest;
*digest = NULL;
return false;
}
@ -170,11 +169,9 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
return false;
}
*digest = new unsigned char[*digestlen + 1];
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
free(*digest);
delete[] *digest;
*digest = NULL;
return false;
}
@ -186,11 +183,9 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
#define MD5_DIGEST_LENGTH 16
size_t get_md5_digest_length(void)
size_t get_md5_digest_length()
{
return MD5_DIGEST_LENGTH;
return 16;
}
#ifdef USE_GNUTLS_NETTLE
@ -201,17 +196,12 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
ssize_t bytes;
unsigned char* result;
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
md5_init(&ctx_md5);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -223,16 +213,9 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
md5_update(&ctx_md5, bytes, buf);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
return NULL;
}
result = new unsigned char[get_md5_digest_length()];
md5_digest(&ctx_md5, get_md5_digest_length(), result);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
@ -254,11 +237,6 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
@ -267,29 +245,23 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
gcry_md_close(ctx_md5);
return NULL;
}
gcry_md_write(ctx_md5, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
return NULL;
}
result = new unsigned char[get_md5_digest_length()];
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
gcry_md_close(ctx_md5);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
@ -298,20 +270,16 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
#define SHA256_DIGEST_LENGTH 32
size_t get_sha256_digest_length(void)
size_t get_sha256_digest_length()
{
return SHA256_DIGEST_LENGTH;
return 32;
}
#ifdef USE_GNUTLS_NETTLE
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
*digest = new unsigned char[*digestlen];
struct sha256_ctx ctx_sha256;
sha256_init(&ctx_sha256);
@ -328,17 +296,12 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
ssize_t bytes;
unsigned char* result;
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
sha256_init(&ctx_sha256);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -350,16 +313,9 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
sha256_update(&ctx_sha256, bytes, buf);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
return NULL;
}
result = new unsigned char[get_sha256_digest_length()];
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
@ -367,16 +323,14 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
*digest = new unsigned char[len];
gcry_md_hd_t ctx_sha256;
gcry_error_t err;
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
free(*digest);
delete[] *digest;
return false;
}
gcry_md_write(ctx_sha256, data, datalen);
@ -402,11 +356,6 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
@ -415,29 +364,23 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
gcry_md_close(ctx_sha256);
return NULL;
}
gcry_md_write(ctx_sha256, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
return NULL;
}
result = new unsigned char[get_sha256_digest_length()];
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
gcry_md_close(ctx_sha256);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}

View File

@ -42,7 +42,7 @@ using namespace std;
//-------------------------------------------------------------------
// Utility Function for version
//-------------------------------------------------------------------
const char* s3fs_crypt_lib_name(void)
const char* s3fs_crypt_lib_name()
{
static const char version[] = "NSS";
@ -52,14 +52,18 @@ const char* s3fs_crypt_lib_name(void)
//-------------------------------------------------------------------
// Utility Function for global init
//-------------------------------------------------------------------
bool s3fs_init_global_ssl(void)
bool s3fs_init_global_ssl()
{
NSS_Init(NULL);
NSS_NoDB_Init(NULL);
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
if(SECSuccess != NSS_NoDB_Init(NULL)){
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
return false;
}
return true;
}
bool s3fs_destroy_global_ssl(void)
bool s3fs_destroy_global_ssl()
{
NSS_Shutdown();
PL_ArenaFinish();
@ -70,12 +74,12 @@ bool s3fs_destroy_global_ssl(void)
//-------------------------------------------------------------------
// Utility Function for crypt lock
//-------------------------------------------------------------------
bool s3fs_init_crypt_mutex(void)
bool s3fs_init_crypt_mutex()
{
return true;
}
bool s3fs_destroy_crypt_mutex(void)
bool s3fs_destroy_crypt_mutex()
{
return true;
}
@ -92,7 +96,6 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
PK11SlotInfo* Slot;
PK11SymKey* pKey;
PK11Context* Context;
SECStatus SecStatus;
unsigned char tmpdigest[64];
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
SECItem NullSecItem = {siBuffer, NULL, 0};
@ -111,9 +114,9 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
}
*digestlen = 0;
if(SECSuccess != (SecStatus = PK11_DigestBegin(Context)) ||
SECSuccess != (SecStatus = PK11_DigestOp(Context, data, datalen)) ||
SECSuccess != (SecStatus = PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest))) )
if(SECSuccess != PK11_DigestBegin(Context) ||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
{
PK11_DestroyContext(Context, PR_TRUE);
PK11_FreeSymKey(pKey);
@ -124,9 +127,7 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
PK11_FreeSymKey(pKey);
PK11_FreeSlot(Slot);
if(NULL == (*digest = (unsigned char*)malloc(*digestlen))){
return false;
}
*digest = new unsigned char[*digestlen];
memcpy(*digest, tmpdigest, *digestlen);
return true;
@ -145,7 +146,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
size_t get_md5_digest_length(void)
size_t get_md5_digest_length()
{
return MD5_LENGTH;
}
@ -166,47 +167,35 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
PK11_DestroyContext(md5ctx, PR_TRUE);
return NULL;
}
PK11_DigestOp(md5ctx, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
PK11_DestroyContext(md5ctx, PR_TRUE);
return NULL;
}
result = new unsigned char[get_md5_digest_length()];
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
PK11_DestroyContext(md5ctx, PR_TRUE);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length(void)
size_t get_sha256_digest_length()
{
return SHA256_LENGTH;
}
@ -214,9 +203,7 @@ size_t get_sha256_digest_length(void)
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
*digest = new unsigned char[*digestlen];
PK11Context* sha256ctx;
unsigned int sha256outlen;
@ -246,17 +233,12 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -269,18 +251,10 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
PK11_DigestOp(sha256ctx, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
PK11_DestroyContext(sha256ctx, PR_TRUE);
return NULL;
}
result = new unsigned char[get_sha256_digest_length()];
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
PK11_DestroyContext(sha256ctx, PR_TRUE);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}

View File

@ -18,15 +18,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <cstdio>
#include <cstdlib>
#include <cerrno>
#include <pthread.h>
#include <unistd.h>
#include <syslog.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <cstring>
#include <openssl/bio.h>
#include <openssl/buffer.h>
#include <openssl/evp.h>
@ -46,7 +46,7 @@ using namespace std;
//-------------------------------------------------------------------
// Utility Function for version
//-------------------------------------------------------------------
const char* s3fs_crypt_lib_name(void)
const char* s3fs_crypt_lib_name()
{
static const char version[] = "OpenSSL";
@ -56,7 +56,7 @@ const char* s3fs_crypt_lib_name(void)
//-------------------------------------------------------------------
// Utility Function for global init
//-------------------------------------------------------------------
bool s3fs_init_global_ssl(void)
bool s3fs_init_global_ssl()
{
ERR_load_crypto_strings();
ERR_load_BIO_strings();
@ -64,7 +64,7 @@ bool s3fs_init_global_ssl(void)
return true;
}
bool s3fs_destroy_global_ssl(void)
bool s3fs_destroy_global_ssl()
{
EVP_cleanup();
ERR_free_strings();
@ -82,56 +82,83 @@ struct CRYPTO_dynlock_value
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
{
if(s3fs_crypt_mutex){
int res;
if(mode & CRYPTO_LOCK){
pthread_mutex_lock(&s3fs_crypt_mutex[pos]);
if(0 != (res = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}else{
pthread_mutex_unlock(&s3fs_crypt_mutex[pos]);
if(0 != (res = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
abort();
}
}
}
}
static unsigned long s3fs_crypt_get_threadid(void)
static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused));
static unsigned long s3fs_crypt_get_threadid()
{
// For FreeBSD etc, some system's pthread_t is structure pointer.
// Then we use cast like C style(not C++) instead of ifdef.
return (unsigned long)(pthread_self());
}
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused));
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
{
struct CRYPTO_dynlock_value* dyndata;
if(NULL == (dyndata = static_cast<struct CRYPTO_dynlock_value*>(malloc(sizeof(struct CRYPTO_dynlock_value))))){
S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value");
struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value();
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int res;
if(0 != (res = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
return NULL;
}
pthread_mutex_init(&(dyndata->dyn_mutex), NULL);
return dyndata;
}
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
{
if(dyndata){
int res;
if(mode & CRYPTO_LOCK){
pthread_mutex_lock(&(dyndata->dyn_mutex));
if(0 != (res = pthread_mutex_lock(&(dyndata->dyn_mutex)))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}else{
pthread_mutex_unlock(&(dyndata->dyn_mutex));
if(0 != (res = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
abort();
}
}
}
}
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
{
if(dyndata){
pthread_mutex_destroy(&(dyndata->dyn_mutex));
free(dyndata);
int res = pthread_mutex_destroy(&(dyndata->dyn_mutex));
if(res != 0){
S3FS_PRN_CRIT("failed to destroy dyn_mutex");
abort();
}
delete dyndata;
}
}
bool s3fs_init_crypt_mutex(void)
bool s3fs_init_crypt_mutex()
{
if(s3fs_crypt_mutex){
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
@ -140,12 +167,18 @@ bool s3fs_init_crypt_mutex(void)
return false;
}
}
if(NULL == (s3fs_crypt_mutex = static_cast<pthread_mutex_t*>(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){
S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex");
return false;
}
s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()];
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
pthread_mutex_init(&s3fs_crypt_mutex[cnt], NULL);
int res = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
if(res != 0){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
return false;
}
}
// static lock
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
@ -158,7 +191,7 @@ bool s3fs_init_crypt_mutex(void)
return true;
}
bool s3fs_destroy_crypt_mutex(void)
bool s3fs_destroy_crypt_mutex()
{
if(!s3fs_crypt_mutex){
return true;
@ -171,10 +204,14 @@ bool s3fs_destroy_crypt_mutex(void)
CRYPTO_set_locking_callback(NULL);
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
int res = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
if(res != 0){
S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt);
abort();
}
}
CRYPTO_cleanup_all_ex_data();
free(s3fs_crypt_mutex);
delete[] s3fs_crypt_mutex;
s3fs_crypt_mutex = NULL;
return true;
@ -189,9 +226,7 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
return false;
}
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
if(NULL == ((*digest) = (unsigned char*)malloc(*digestlen))){
return false;
}
*digest = new unsigned char[*digestlen];
if(is_sha256){
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
}else{
@ -214,7 +249,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
size_t get_md5_digest_length(void)
size_t get_md5_digest_length()
{
return MD5_DIGEST_LENGTH;
}
@ -234,17 +269,12 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
MD5_Init(&md5ctx);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -257,23 +287,16 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
return NULL;
}
result = new unsigned char[get_md5_digest_length()];
MD5_Final(result, &md5ctx);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length(void)
size_t get_sha256_digest_length()
{
return SHA256_DIGEST_LENGTH;
}
@ -281,9 +304,7 @@ size_t get_sha256_digest_length(void)
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
*digest = new unsigned char[*digestlen];
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
@ -311,18 +332,13 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
sha256ctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(sha256ctx, md, NULL);
memset(buf, 0, 512);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
bytes = pread(fd, buf, bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -335,17 +351,10 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
EVP_DigestUpdate(sha256ctx, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
EVP_MD_CTX_destroy(sha256ctx);
return NULL;
}
result = new unsigned char[get_sha256_digest_length()];
EVP_DigestFinal_ex(sha256ctx, result, NULL);
EVP_MD_CTX_destroy(sha256ctx);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}

75
src/psemaphore.h Normal file
View File

@ -0,0 +1,75 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_SEMAPHORE_H_
#define S3FS_SEMAPHORE_H_
// portability wrapper for sem_t since macOS does not implement it
#ifdef __APPLE__
#include <dispatch/dispatch.h>
class Semaphore
{
public:
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
~Semaphore() {
// macOS cannot destroy a semaphore with posts less than the initializer
for(int i = 0; i < get_value(); ++i){
post();
}
dispatch_release(sem);
}
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
void post() { dispatch_semaphore_signal(sem); }
int get_value() const { return value; }
private:
const int value;
dispatch_semaphore_t sem;
};
#else
#include <errno.h>
#include <semaphore.h>
class Semaphore
{
public:
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
~Semaphore() { sem_destroy(&mutex); }
void wait()
{
int r;
do {
r = sem_wait(&mutex);
} while (r == -1 && errno == EINTR);
}
void post() { sem_post(&mutex); }
int get_value() const { return value; }
private:
const int value;
sem_t mutex;
};
#endif
#endif // S3FS_SEMAPHORE_H_

File diff suppressed because it is too large Load Diff

View File

@ -21,68 +21,65 @@
#define S3FS_S3_H_
#define FUSE_USE_VERSION 26
#define FIVE_GB 5368709120LL
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
#include <fuse.h>
#define S3FS_FUSE_EXIT() { \
#define S3FS_FUSE_EXIT() \
do{ \
struct fuse_context* pcxt = fuse_get_context(); \
if(pcxt){ \
fuse_exit(pcxt->fuse); \
} \
}
}while(0)
// [NOTE]
// s3fs use many small allocated chunk in heap area for stats
// cache and parsing xml, etc. The OS may decide that giving
// this little memory back to the kernel will cause too much
// overhead and delay the operation.
// Address of gratitude, this workaround quotes a document of
// libxml2.( http://xmlsoft.org/xmlmem.html )
//
// s3fs use many small allocated chunk in heap area for
// stats cache and parsing xml, etc. The OS may decide
// that giving this little memory back to the kernel
// will cause too much overhead and delay the operation.
// So s3fs calls malloc_trim function to really get the
// memory back. Following macros is prepared for that
// your system does not have it.
//
// Address of gratitude, this workaround quotes a document
// of libxml2.
// http://xmlsoft.org/xmlmem.html
// When valgrind is used to test memory leak of s3fs, a large
// amount of chunk may be reported. You can check the memory
// release accurately by defining the S3FS_MALLOC_TRIM flag
// and building it. Also, when executing s3fs, you can define
// the MMAP_THRESHOLD environment variable and check more
// accurate memory leak.( see, man 3 free )
//
#ifdef S3FS_MALLOC_TRIM
#ifdef HAVE_MALLOC_TRIM
#include <malloc.h>
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
#else // HAVE_MALLOC_TRIM
#define S3FS_MALLOCTRIM(pad)
#endif // HAVE_MALLOC_TRIM
#else // S3FS_MALLOC_TRIM
#define S3FS_MALLOCTRIM(pad)
#endif // S3FS_MALLOC_TRIM
#define DISPWARN_MALLOCTRIM(str)
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
#define S3FS_XMLFREEDOC(doc) \
{ \
do{ \
xmlFreeDoc(doc); \
S3FS_MALLOCTRIM(0); \
}
}while(0)
#define S3FS_XMLFREE(ptr) \
{ \
do{ \
xmlFree(ptr); \
S3FS_MALLOCTRIM(0); \
}
}while(0)
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
{ \
do{ \
xmlXPathFreeContext(ctx); \
S3FS_MALLOCTRIM(0); \
}
}while(0)
#define S3FS_XMLXPATHFREEOBJECT(obj) \
{ \
do{ \
xmlXPathFreeObject(obj); \
S3FS_MALLOCTRIM(0); \
}
#else // HAVE_MALLOC_TRIM
#define DISPWARN_MALLOCTRIM(str) \
fprintf(stderr, "Warning: %s without malloc_trim is possibility of the use memory increase.\n", program_name.c_str())
#define S3FS_MALLOCTRIM(pad)
#define S3FS_XMLFREEDOC(doc) xmlFreeDoc(doc)
#define S3FS_XMLFREE(ptr) xmlFree(ptr)
#define S3FS_XMLXPATHFREECONTEXT(ctx) xmlXPathFreeContext(ctx)
#define S3FS_XMLXPATHFREEOBJECT(obj) xmlXPathFreeObject(obj)
#endif // HAVE_MALLOC_TRIM
}while(0)
#endif // S3FS_S3_H_

File diff suppressed because it is too large Load Diff

View File

@ -86,14 +86,19 @@ typedef struct mvnode {
class AutoLock
{
private:
pthread_mutex_t* auto_mutex;
bool is_lock_acquired;
public:
explicit AutoLock(pthread_mutex_t* pmutex, bool no_wait = false);
enum Type {
NO_WAIT = 1,
ALREADY_LOCKED = 2,
NONE = 0
};
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
bool isLockAcquired() const;
~AutoLock();
private:
pthread_mutex_t* const auto_mutex;
bool is_lock_acquired;
};
//-------------------------------------------------------------------
@ -105,23 +110,27 @@ MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, b
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
void free_mvnodes(MVNODE *head);
void init_sysconf_vars();
std::string get_username(uid_t uid);
int is_uid_include_group(uid_t uid, gid_t gid);
std::string mydirname(const char* path);
std::string mydirname(std::string path);
std::string mydirname(const std::string& path);
std::string mybasename(const char* path);
std::string mybasename(std::string path);
std::string mybasename(const std::string& path);
int mkdirp(const std::string& path, mode_t mode);
std::string get_exist_directory_path(const std::string& path);
bool check_exist_dir_permission(const char* dirpath);
bool delete_files_in_dir(const char* dir, bool is_remove_own);
bool compare_sysname(const char* target);
time_t get_mtime(const char *s);
time_t get_mtime(headers_t& meta, bool overcheck = true);
time_t get_ctime(headers_t& meta, bool overcheck = true);
off_t get_size(const char *s);
off_t get_size(headers_t& meta);
mode_t get_mode(const char *s);
mode_t get_mode(const char *s, int base = 0);
mode_t get_mode(headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
uid_t get_uid(const char *s);
uid_t get_uid(headers_t& meta);
@ -132,6 +141,7 @@ time_t cvtIAMExpireStringToTime(const char* s);
time_t get_lastmodified(const char* s);
time_t get_lastmodified(headers_t& meta);
bool is_need_check_obj_detail(headers_t& meta);
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
void show_usage(void);
void show_help(void);

286
src/sighandlers.cpp Normal file
View File

@ -0,0 +1,286 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cerrno>
#include <syslog.h>
#include <pthread.h>
#include <curl/curl.h>
#include <csignal>
#include <algorithm>
#include <map>
#include <string>
#include <list>
#include <vector>
#include "common.h"
#include "sighandlers.h"
#include "curl.h"
#include "fdcache.h"
#include "psemaphore.h"
using namespace std;
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
s3fs_log_level debug_level = S3FS_LOG_CRIT;
const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "};
//-------------------------------------------------------------------
// Class S3fsSignals
//-------------------------------------------------------------------
S3fsSignals* S3fsSignals::pSingleton = NULL;
bool S3fsSignals::enableUsr1 = false;
//-------------------------------------------------------------------
// Class methods
//-------------------------------------------------------------------
bool S3fsSignals::Initialize()
{
if(!S3fsSignals::pSingleton){
S3fsSignals::pSingleton = new S3fsSignals;
}
return true;
}
bool S3fsSignals::Destroy()
{
if(S3fsSignals::pSingleton){
delete S3fsSignals::pSingleton;
}
return true;
}
void S3fsSignals::HandlerUSR1(int sig)
{
if(SIGUSR1 != sig){
S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig);
return;
}
S3fsSignals* pSigobj = S3fsSignals::get();
if(!pSigobj){
S3FS_PRN_ERR("S3fsSignals object is not initialized.");
return;
}
if(!pSigobj->WakeupUsr1Thread()){
S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1.");
return;
}
}
bool S3fsSignals::SetUsr1Handler(const char* path)
{
// set output file
if(!FdManager::SetCacheCheckOutput(path)){
S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)");
return false;
}
S3fsSignals::enableUsr1 = true;
return true;
}
void* S3fsSignals::CheckCacheWorker(void* arg)
{
Semaphore* pSem = static_cast<Semaphore*>(arg);
if(!pSem){
pthread_exit(NULL);
}
if(!S3fsSignals::enableUsr1){
pthread_exit(NULL);
}
// wait and loop
while(S3fsSignals::enableUsr1){
// wait
pSem->wait();
if(!S3fsSignals::enableUsr1){
break; // assap
}
// check all cache
if(!FdManager::get()->CheckAllCache()){
S3FS_PRN_ERR("Processing failed due to some problem.");
}
// do not allow request queuing
for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){
pSem->wait();
}
}
return NULL;
}
void S3fsSignals::HandlerUSR2(int sig)
{
if(SIGUSR2 == sig){
S3fsSignals::BumpupLogLevel();
}else{
S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig);
}
}
bool S3fsSignals::InitUsr2Handler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerUSR2;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR2, &sa, NULL)){
return false;
}
return true;
}
s3fs_log_level S3fsSignals::SetLogLevel(s3fs_log_level level)
{
if(level == debug_level){
return debug_level;
}
s3fs_log_level old = debug_level;
debug_level = level;
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
return old;
}
s3fs_log_level S3fsSignals::BumpupLogLevel()
{
s3fs_log_level old = debug_level;
debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR :
S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN :
S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO :
S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG :
S3FS_LOG_CRIT );
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
return old;
}
//-------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------
S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL)
{
if(S3fsSignals::enableUsr1){
if(!InitUsr1Handler()){
S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue...");
}
}
if(!S3fsSignals::InitUsr2Handler()){
S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue...");
}
}
S3fsSignals::~S3fsSignals()
{
if(S3fsSignals::enableUsr1){
if(!DestroyUsr1Handler()){
S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue...");
}
}
}
bool S3fsSignals::InitUsr1Handler()
{
if(pThreadUsr1 || pSemUsr1){
S3FS_PRN_ERR("Already run thread for SIGUSR1");
return false;
}
// create thread
int result;
pSemUsr1 = new Semaphore(0);
pThreadUsr1 = new pthread_t;
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
delete pSemUsr1;
delete pThreadUsr1;
pSemUsr1 = NULL;
pThreadUsr1 = NULL;
return false;
}
// set handler
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerUSR1;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR1, &sa, NULL)){
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
DestroyUsr1Handler();
return false;
}
return true;
}
bool S3fsSignals::DestroyUsr1Handler()
{
if(!pThreadUsr1 || !pSemUsr1){
return false;
}
// for thread exit
S3fsSignals::enableUsr1 = false;
// wakeup thread
pSemUsr1->post();
// wait for thread exiting
void* retval = NULL;
int result;
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
return false;
}
delete pSemUsr1;
delete pThreadUsr1;
pSemUsr1 = NULL;
pThreadUsr1 = NULL;
return true;
}
bool S3fsSignals::WakeupUsr1Thread()
{
if(!pThreadUsr1 || !pSemUsr1){
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
return false;
}
pSemUsr1->post();
return true;
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

73
src/sighandlers.h Normal file
View File

@ -0,0 +1,73 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_SIGHANDLERS_H_
#define S3FS_SIGHANDLERS_H_
#include "psemaphore.h"
//----------------------------------------------
// class S3fsSignals
//----------------------------------------------
class S3fsSignals
{
private:
static S3fsSignals* pSingleton;
static bool enableUsr1;
pthread_t* pThreadUsr1;
Semaphore* pSemUsr1;
protected:
static S3fsSignals* get(void) { return pSingleton; }
static void HandlerUSR1(int sig);
static void* CheckCacheWorker(void* arg);
static void HandlerUSR2(int sig);
static bool InitUsr2Handler(void);
S3fsSignals();
~S3fsSignals();
bool InitUsr1Handler(void);
bool DestroyUsr1Handler(void);
bool WakeupUsr1Thread(void);
public:
static bool Initialize(void);
static bool Destroy(void);
static bool SetUsr1Handler(const char* path);
static s3fs_log_level SetLogLevel(s3fs_log_level level);
static s3fs_log_level BumpupLogLevel(void);
};
#endif // S3FS_SIGHANDLERS_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

View File

@ -17,12 +17,15 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cerrno>
#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <syslog.h>
#include <ctime>
#include <stdexcept>
#include <sstream>
#include <string>
#include <map>
@ -32,47 +35,70 @@
using namespace std;
template <class T> std::string str(T value) {
std::ostringstream s;
s << value;
return s.str();
}
template std::string str(short value);
template std::string str(unsigned short value);
template std::string str(int value);
template std::string str(unsigned int value);
template std::string str(long value);
template std::string str(unsigned long value);
template std::string str(long long value);
template std::string str(unsigned long long value);
static const char hexAlphabet[] = "0123456789ABCDEF";
off_t s3fs_strtoofft(const char* str, bool is_base_16)
// replacement for C++11 std::stoll
off_t s3fs_strtoofft(const char* str, int base)
{
if(!str || '\0' == *str){
return 0;
errno = 0;
char *temp;
long long result = strtoll(str, &temp, base);
if(temp == str || *temp != '\0'){
throw std::invalid_argument("s3fs_strtoofft");
}
off_t result;
bool chk_space;
bool chk_base16_prefix;
for(result = 0, chk_space = false, chk_base16_prefix = false; '\0' != *str; str++){
// check head space
if(!chk_space && isspace(*str)){
continue;
}else if(!chk_space){
chk_space = true;
}
// check prefix for base 16
if(!chk_base16_prefix){
chk_base16_prefix = true;
if('0' == *str && ('x' == str[1] || 'X' == str[1])){
is_base_16 = true;
str++;
continue;
}
}
// check like isalnum and set data
result *= (is_base_16 ? 16 : 10);
if('0' <= *str || '9' < *str){
result += static_cast<off_t>(*str - '0');
}else if(is_base_16){
if('A' <= *str && *str <= 'F'){
result += static_cast<off_t>(*str - 'A' + 0x0a);
}else if('a' <= *str && *str <= 'f'){
result += static_cast<off_t>(*str - 'a' + 0x0a);
}else{
return 0;
}
}else{
return 0;
if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){
throw std::out_of_range("s3fs_strtoofft");
}
return result;
}
// wrapped s3fs_strtoofft()
//
// This function catches the s3fs_strtoofft () exception and returns a boolean value.
//
bool try_strtoofft(const char* str, off_t& value, int base)
{
if(str){
try{
value = s3fs_strtoofft(str, base);
}catch(std::exception &e){
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t.", str);
return false;
}
}else{
S3FS_PRN_WARN("parameter string is null.");
return false;
}
return true;
}
// wrapped try_strtoofft -> s3fs_strtoofft()
//
// This function returns 0 if a value that cannot be converted is specified.
// Only call if 0 is considered an error and the operation can continue.
//
off_t cvt_strtoofft(const char* str, int base)
{
off_t result = 0;
if(!try_strtoofft(str, result, base)){
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t, thus return 0 as default.", (str ? str : "null"));
return 0;
}
return result;
}
@ -80,7 +106,7 @@ off_t s3fs_strtoofft(const char* str, bool is_base_16)
string lower(string s)
{
// change each character of the string to lower case
for(unsigned int i = 0; i < s.length(); i++){
for(size_t i = 0; i < s.length(); i++){
s[i] = tolower(s[i]);
}
return s;
@ -105,8 +131,7 @@ string trim_right(const string &s, const string &t /* = SPACES */)
string trim(const string &s, const string &t /* = SPACES */)
{
string d(s);
return trim_left(trim_right(d, t), t);
return trim_left(trim_right(s, t), t);
}
/**
@ -117,7 +142,7 @@ string trim(const string &s, const string &t /* = SPACES */)
string urlEncode(const string &s)
{
string result;
for (unsigned i = 0; i < s.length(); ++i) {
for (size_t i = 0; i < s.length(); ++i) {
char c = s[i];
if (c == '/' // Note- special case for fuse paths...
|| c == '.'
@ -145,7 +170,7 @@ string urlEncode(const string &s)
string urlEncode2(const string &s)
{
string result;
for (unsigned i = 0; i < s.length(); ++i) {
for (size_t i = 0; i < s.length(); ++i) {
char c = s[i];
if (c == '=' // Note- special case for fuse paths...
|| c == '&' // Note- special case for s3...
@ -170,11 +195,11 @@ string urlEncode2(const string &s)
string urlDecode(const string& s)
{
string result;
for(unsigned i = 0; i < s.length(); ++i){
for(size_t i = 0; i < s.length(); ++i){
if(s[i] != '%'){
result += s[i];
}else{
char ch = 0;
int ch = 0;
if(s.length() <= ++i){
break; // wrong format.
}
@ -184,7 +209,7 @@ string urlDecode(const string& s)
}
ch *= 16;
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
result += ch;
result += static_cast<char>(ch);
}
}
return result;
@ -195,15 +220,15 @@ bool takeout_str_dquart(string& str)
size_t pos;
// '"' for start
if(string::npos != (pos = str.find_first_of("\""))){
if(string::npos != (pos = str.find_first_of('\"'))){
str = str.substr(pos + 1);
// '"' for end
if(string::npos == (pos = str.find_last_of("\""))){
if(string::npos == (pos = str.find_last_of('\"'))){
return false;
}
str = str.substr(0, pos);
if(string::npos != str.find_first_of("\"")){
if(string::npos != str.find_first_of('\"')){
return false;
}
}
@ -244,7 +269,8 @@ string get_date_rfc850()
{
char buf[100];
time_t t = time(NULL);
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t));
struct tm res;
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
return buf;
}
@ -258,17 +284,88 @@ void get_date_sigv3(string& date, string& date8601)
string get_date_string(time_t tm)
{
char buf[100];
strftime(buf, sizeof(buf), "%Y%m%d", gmtime(&tm));
struct tm res;
strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res));
return buf;
}
string get_date_iso8601(time_t tm)
{
char buf[100];
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime(&tm));
struct tm res;
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res));
return buf;
}
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
{
if(!pdate){
return false;
}
struct tm tm;
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
if(prest == pdate){
// wrong format
return false;
}
unixtime = mktime(&tm);
return true;
}
//
// Convert to unixtime from string which formatted by following:
// "12Y12M12D12h12m12s", "86400s", "9h30m", etc
//
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime)
{
if(!argv){
return false;
}
unixtime = 0;
const char* ptmp;
int last_unit_type = 0; // unit flag.
bool is_last_number;
time_t tmptime;
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
if('0' <= *ptmp && *ptmp <= '9'){
tmptime *= 10;
tmptime += static_cast<time_t>(*ptmp - '0');
is_last_number = true;
}else if(is_last_number){
if('Y' == *ptmp && 1 > last_unit_type){
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
last_unit_type = 1;
}else if('M' == *ptmp && 2 > last_unit_type){
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
last_unit_type = 2;
}else if('D' == *ptmp && 3 > last_unit_type){
unixtime += (tmptime * (60 * 60 * 24));
last_unit_type = 3;
}else if('h' == *ptmp && 4 > last_unit_type){
unixtime += (tmptime * (60 * 60));
last_unit_type = 4;
}else if('m' == *ptmp && 5 > last_unit_type){
unixtime += (tmptime * 60);
last_unit_type = 5;
}else if('s' == *ptmp && 6 > last_unit_type){
unixtime += tmptime;
last_unit_type = 6;
}else{
return false;
}
tmptime = 0;
is_last_number = false;
}else{
return false;
}
}
if(is_last_number){
return false;
}
return true;
}
std::string s3fs_hex(const unsigned char* input, size_t length)
{
std::string hex;
@ -285,12 +382,10 @@ char* s3fs_base64(const unsigned char* input, size_t length)
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
char* result;
if(!input || 0 >= length){
if(!input || 0 == length){
return NULL;
}
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
return NULL; // ENOMEM
}
result = new char[((length / 3) + 1) * 4 + 1];
unsigned char parts[4];
size_t rpos;
@ -338,9 +433,7 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
if(!input || 0 == strlen(input) || !plength){
return NULL;
}
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
return NULL; // ENOMEM
}
result = new unsigned char[strlen(input) + 1];
unsigned char parts[4];
size_t input_len = strlen(input);
@ -367,6 +460,135 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
return result;
}
/*
* detect and rewrite invalid utf8. We take invalid bytes
* and encode them into a private region of the unicode
* space. This is sometimes known as wtf8, wobbly transformation format.
* it is necessary because S3 validates the utf8 used for identifiers for
* correctness, while some clients may provide invalid utf, notably
* windows using cp1252.
*/
// Base location for transform. The range 0xE000 - 0xF8ff
// is a private range, se use the start of this range.
static unsigned int escape_base = 0xe000;
// encode bytes into wobbly utf8.
// 'result' can be null. returns true if transform was needed.
bool s3fs_wtf8_encode(const char *s, string *result)
{
bool invalid = false;
// Pass valid utf8 code through
for (; *s; s++) {
const unsigned char c = *s;
// single byte encoding
if (c <= 0x7f) {
if (result) {
*result += c;
}
continue;
}
// otherwise, it must be one of the valid start bytes
if ( c >= 0xc2 && c <= 0xf5 ) {
// two byte encoding
// don't need bounds check, string is zero terminated
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
// all two byte encodings starting higher than c1 are valid
if (result) {
*result += c;
*result += *(++s);
}
continue;
}
// three byte encoding
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
// not overlong and not a surrogate pair
if (result) {
*result += c;
*result += *(++s);
*result += *(++s);
}
continue;
}
}
// four byte encoding
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
if (code >= 0x10000 && code <= 0x10ffff) {
// not overlong and in defined unicode space
if (result) {
*result += c;
*result += *(++s);
*result += *(++s);
*result += *(++s);
}
continue;
}
}
}
// printf("invalid %02x at %d\n", c, i);
// Invalid utf8 code. Convert it to a private two byte area of unicode
// e.g. the e000 - f8ff area. This will be a three byte encoding
invalid = true;
if (result) {
unsigned escape = escape_base + c;
*result += static_cast<char>(0xe0 | ((escape >> 12) & 0x0f));
*result += static_cast<char>(0x80 | ((escape >> 06) & 0x3f));
*result += static_cast<char>(0x80 | ((escape >> 00) & 0x3f));
}
}
return invalid;
}
string s3fs_wtf8_encode(const string &s)
{
string result;
s3fs_wtf8_encode(s.c_str(), &result);
return result;
}
// The reverse operation, turn encoded bytes back into their original values
// The code assumes that we map to a three-byte code point.
bool s3fs_wtf8_decode(const char *s, string *result)
{
bool encoded = false;
for (; *s; s++) {
unsigned char c = *s;
// look for a three byte tuple matching our encoding code
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
unsigned code = (c & 0x0f) << 12;
code |= (s[1] & 0x3f) << 6;
code |= (s[2] & 0x3f) << 0;
if (code >= escape_base && code <= escape_base + 0xff) {
// convert back
encoded = true;
if(result){
*result += static_cast<char>(code - escape_base);
}
s+=2;
continue;
}
}
if (result) {
*result += c;
}
}
return encoded;
}
string s3fs_wtf8_decode(const string &s)
{
string result;
s3fs_wtf8_decode(s.c_str(), &result);
return result;
}
/*
* Local variables:
* tab-width: 4

View File

@ -28,18 +28,17 @@
#include <sys/types.h>
#include <string>
#include <sstream>
#define SPACES " \t\r\n"
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
static const std::string SPACES = " \t\r\n";
template<typename T> std::string str(T value) {
std::stringstream s;
s << value;
return s.str();
}
static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); }
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
template <class T> std::string str(T value);
// Convert string to off_t. Throws std::invalid_argument and std::out_of_range on bad input.
off_t s3fs_strtoofft(const char* str, int base = 0);
bool try_strtoofft(const char* str, off_t& value, int base = 0);
off_t cvt_strtoofft(const char* str, int base = 0);
std::string trim_left(const std::string &s, const std::string &t = SPACES);
std::string trim_right(const std::string &s, const std::string &t = SPACES);
@ -49,6 +48,8 @@ std::string get_date_rfc850(void);
void get_date_sigv3(std::string& date, std::string& date8601);
std::string get_date_string(time_t tm);
std::string get_date_iso8601(time_t tm);
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime);
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
std::string urlEncode(const std::string &s);
std::string urlEncode2(const std::string &s);
std::string urlDecode(const std::string& s);
@ -59,6 +60,11 @@ std::string s3fs_hex(const unsigned char* input, size_t length);
char* s3fs_base64(const unsigned char* input, size_t length);
unsigned char* s3fs_decode64(const char* input, size_t* plength);
bool s3fs_wtf8_encode(const char *s, std::string *result);
std::string s3fs_wtf8_encode(const std::string &s);
bool s3fs_wtf8_decode(const char *s, std::string *result);
std::string s3fs_wtf8_decode(const std::string &s);
#endif // S3FS_STRING_UTIL_H_
/*

View File

@ -20,11 +20,21 @@
#include <limits>
#include <stdint.h>
#include <strings.h>
#include <string>
#include <map>
#include "common.h"
#include "string_util.h"
#include "test_util.h"
//-------------------------------------------------------------------
// Global variables for test_string_util
//-------------------------------------------------------------------
bool foreground = false;
s3fs_log_level debug_level = S3FS_LOG_CRIT;
std::string instance_name;
void test_trim()
{
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
@ -75,9 +85,52 @@ void test_base64()
// TODO: invalid input
}
void test_strtoofft()
{
ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast<off_t>(0L));
ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast<off_t>(9L));
try{
s3fs_strtoofft("A");
abort();
}catch(std::exception &e){
// expected
}
ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast<off_t>(10L));
ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast<off_t>(15L));
ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast<off_t>(10L));
ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast<off_t>(15L));
ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast<off_t>(3735928559L));
}
void test_wtf8_encoding()
{
std::string ascii("normal string");
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
std::string broken = utf8;
broken[14] = 0x97;
std::string mixed = ascii + utf8 + cp1252;
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
}
int main(int argc, char *argv[])
{
test_trim();
test_base64();
test_strtoofft();
test_wtf8_encoding();
return 0;
}

View File

@ -20,11 +20,50 @@
#include <cstdlib>
#include <iostream>
#include <stdio.h>
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
{
if (x != y) {
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
std::cerr << std::endl;
std::exit(1);
}
}
template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
{
if (x != y) {
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
for (unsigned i=0; i<x.length(); i++)
fprintf(stderr, "%02x ", (unsigned char)x[i]);
std::cerr << std::endl;
for (unsigned i=0; i<y.length(); i++)
fprintf(stderr, "%02x ", (unsigned char)y[i]);
std::cerr << std::endl;
std::exit(1);
}
}
template <typename T> void assert_nequals(const T &x, const T &y, const char *file, int line)
{
if (x == y) {
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
std::exit(1);
}
}
template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
{
if (x == y) {
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
for (unsigned i=0; i<x.length(); i++)
fprintf(stderr, "%02x ", (unsigned char)x[i]);
std::cerr << std::endl;
for (unsigned i=0; i<y.length(); i++)
fprintf(stderr, "%02x ", (unsigned char)y[i]);
std::cerr << std::endl;
std::exit(1);
}
}
@ -34,8 +73,8 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
if(x == NULL && y == NULL){
return;
// cppcheck-suppress nullPointerRedundantCheck
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
std::exit(1);
}
}
@ -43,5 +82,8 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
#define ASSERT_EQUALS(x, y) \
assert_equals((x), (y), __FILE__, __LINE__)
#define ASSERT_NEQUALS(x, y) \
assert_nequals((x), (y), __FILE__, __LINE__)
#define ASSERT_STREQUALS(x, y) \
assert_strequals((x), (y), __FILE__, __LINE__)

141
test/filter-suite-log.sh Executable file
View File

@ -0,0 +1,141 @@
#!/bin/bash
#
# s3fs - FUSE-based file system backed by Amazon S3
#
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
func_usage()
{
echo ""
echo "Usage: $1 [-h] <log file path>"
echo " -h print help"
echo " log file path path for test-suite.log"
echo ""
}
PRGNAME=`basename $0`
SCRIPTDIR=`dirname $0`
S3FSDIR=`cd ${SCRIPTDIR}/..; pwd`
TOPDIR=`cd ${S3FSDIR}/test; pwd`
SUITELOG="${TOPDIR}/test-suite.log"
TMP_LINENO_FILE="/tmp/.lineno.tmp"
while [ $# -ne 0 ]; do
if [ "X$1" = "X" ]; then
break
elif [ "X$1" = "X-h" -o "X$1" = "X-H" -o "X$1" = "X--help" -o "X$1" = "X--HELP" ]; then
func_usage ${PRGNAME}
exit 0
else
SUITELOG=$1
fi
shift
done
if [ ! -f ${SUITELOG} ]; then
echo "[ERROR] not found ${SUITELOG} log file."
exit 1
fi
#
# Extract keyword line numbers and types
#
# 0 : normal line
# 1 : start line for one small test(specified in integration-test-main.sh)
# 2 : passed line of end of one small test(specified in test-utils.sh)
# 3 : failed line of end of one small test(specified in test-utils.sh)
#
grep -n -e 'test_.*: ".*"' -o -e 'test_.* passed' -o -e 'test_.* failed' ${SUITELOG} 2>/dev/null | sed 's/:test_.*: ".*"/ 1/g' | sed 's/:test_.* passed/ 2/g' | sed 's/:test_.* failed/ 3/g' > ${TMP_LINENO_FILE}
#
# Loop for printing result
#
prev_line_type=0
prev_line_number=1
while read line; do
# line is "<line number> <line type>"
number_type=($line)
head_line_cnt=`expr ${number_type[0]} - 1`
tail_line_cnt=`expr ${number_type[0]} - ${prev_line_number}`
if [ ${number_type[1]} -eq 2 ]; then
echo ""
fi
if [ ${prev_line_type} -eq 1 ]; then
if [ ${number_type[1]} -eq 2 ]; then
# if passed, cut s3fs information messages
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
elif [ ${number_type[1]} -eq 3 ]; then
# if failed, print all
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
else
# there is start keyword but not end keyword, so print all
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
fi
elif [ ${prev_line_type} -eq 2 -o ${prev_line_type} -eq 3 ]; then
if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then
# previous is end of chmpx, but this type is end of chmpx without start keyword. then print all
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
else
# this area is not from start to end, cut s3fs information messages
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
fi
else
if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then
# previous is normal, but this type is end of chmpx without start keyword. then print all
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
else
# this area is normal, cut s3fs information messages
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
fi
fi
if [ ${number_type[1]} -eq 3 ]; then
echo ""
fi
prev_line_type=${number_type[1]}
prev_line_number=${number_type[0]}
done < ${TMP_LINENO_FILE}
#
# Print rest lines
#
file_line_cnt=`wc -l ${SUITELOG} | awk '{print $1}'`
tail_line_cnt=`expr ${file_line_cnt} - ${prev_line_number}`
if [ ${prev_line_type} -eq 1 ]; then
tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%'
else
tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
fi
#
# Remove temp file
#
rm -f ${TMP_LINENO_FILE}
exit 0
#
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# End:
# vim600: noet sw=4 ts=4 fdm=marker
# vim<600: noet sw=4 ts=4
#

View File

@ -8,13 +8,14 @@
# environment variables:
#
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
# S3FS_PROFILE=name s3fs profile to use (overrides key file)
# TEST_BUCKET_1=bucketname Name of bucket to use
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
# S3_URL="http://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
# S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
#
# Example of running against Amazon S3 using a bucket named "bucket:
#
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" ./small-integration-test.sh
#
# To change the s3fs-fuse debug level:
#
@ -27,7 +28,7 @@
#
# Run all of the tests from the makefile
#
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" make check
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" make check
#
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
# useful for poking around with plain old curl
@ -38,10 +39,12 @@
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
set -o errexit
set -o pipefail
S3FS=../src/s3fs
# Allow these defaulted values to be overridden
: ${S3_URL:="http://127.0.0.1:8080"}
: ${S3_URL:="https://127.0.0.1:8080"}
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
: ${TEST_BUCKET_1:="s3fs-integration-test"}
@ -50,7 +53,7 @@ export S3_URL
export TEST_SCRIPT_DIR=`pwd`
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
S3PROXY_VERSION="1.5.2"
S3PROXY_VERSION="1.7.1"
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
@ -60,6 +63,11 @@ then
fi
chmod 600 "$S3FS_CREDENTIALS_FILE"
if [ -z "${S3FS_PROFILE}" ]; then
export AWS_ACCESS_KEY_ID=$(cut -d: -f1 ${S3FS_CREDENTIALS_FILE})
export AWS_SECRET_ACCESS_KEY=$(cut -d: -f2 ${S3FS_CREDENTIALS_FILE})
fi
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
then
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
@ -72,18 +80,18 @@ function retry {
N=$1; shift;
status=0
for i in $(seq $N); do
echo "Trying: $@"
$@
echo "Trying: $*"
"$@"
status=$?
if [ $status == 0 ]; then
break
fi
sleep 1
echo "Retrying: $@"
echo "Retrying: $*"
done
if [ $status != 0 ]; then
echo "timeout waiting for $@"
echo "timeout waiting for $*"
fi
set -o errexit
return $status
@ -108,7 +116,8 @@ function start_s3proxy {
chmod +x "${S3PROXY_BINARY}"
fi
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG &
S3PROXY_PID=$!
# wait for S3Proxy to start
for i in $(seq 30);
@ -121,8 +130,6 @@ function start_s3proxy {
fi
sleep 1
done
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
fi
}
@ -130,17 +137,17 @@ function stop_s3proxy {
if [ -n "${S3PROXY_PID}" ]
then
kill $S3PROXY_PID
wait $S3PROXY_PID
fi
}
# Mount the bucket, function arguments passed to s3fs in addition to
# a set of common arguments.
function start_s3fs {
# Public bucket if PUBLIC is set
if [ -n "${PUBLIC}" ]; then
AUTH_OPT="-o public_bucket=1"
elif [ -n "${S3FS_PROFILE}" ]; then
AUTH_OPT="-o profile=${S3FS_PROFILE}"
else
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
fi
@ -148,14 +155,21 @@ function start_s3fs {
# If VALGRIND is set, pass it as options to valgrind.
# start valgrind-listener in another shell.
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
# Start valgind-listener (default port is 1500)
# Start valgrind-listener (default port is 1500)
if [ -n "${VALGRIND}" ]; then
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
fi
# On OSX only, we need to specify the direct_io and auto_cache flag.
if [ `uname` = "Darwin" ]; then
DIRECT_IO_OPT="-o direct_io -o auto_cache"
else
DIRECT_IO_OPT=""
fi
# Common s3fs options:
#
# TODO: Allow all these options to be overriden with env variables
# TODO: Allow all these options to be overridden with env variables
#
# use_path_request_style
# The test env doesn't have virtual hosts
@ -181,15 +195,39 @@ function start_s3fs {
$TEST_BUCKET_MOUNT_POINT_1 \
-o use_path_request_style \
-o url=${S3_URL} \
-o no_check_certificate \
-o ssl_verify_hostname=0 \
-o use_xattr=1 \
-o createbucket \
${AUTH_OPT} \
${DIRECT_IO_OPT} \
-o stat_cache_expire=1 \
-o stat_cache_interval_expire=1 \
-o dbglevel=${DBGLEVEL:=info} \
-o retries=3 \
-f \
${@} \
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
"${@}" | stdbuf -oL -eL sed $SED_BUFFER_FLAG "s/^/s3fs: /" &
S3FS_PID=$!
)
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
if [ `uname` = "Darwin" ]; then
set +o errexit
TRYCOUNT=0
while [ $TRYCOUNT -le 20 ]; do
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
if [ $? -eq 0 ]; then
break;
fi
sleep 1
TRYCOUNT=`expr ${TRYCOUNT} + 1`
done
if [ $? -ne 0 ]; then
exit 1
fi
set -o errexit
else
retry 20 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
fi
# Quick way to start system up for manual testing with options under test
if [[ -n ${INTERACT} ]]; then
@ -202,14 +240,20 @@ function start_s3fs {
function stop_s3fs {
# Retry in case file system is in use
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
if [ `uname` = "Darwin" ]; then
if df | grep -q $TEST_BUCKET_MOUNT_POINT_1; then
retry 10 df | grep -q $TEST_BUCKET_MOUNT_POINT_1 && umount $TEST_BUCKET_MOUNT_POINT_1
fi
else
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
fi
fi
}
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
function common_exit_handler {
stop_s3proxy
stop_s3fs
stop_s3proxy
}
trap common_exit_handler EXIT

View File

@ -1,25 +1,21 @@
#!/bin/bash
set -o errexit
set -o pipefail
source test-utils.sh
function test_append_file {
describe "Testing append to file ..."
TEST_INPUT="echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
# Write a small test file
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
do
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
echo $TEST_INPUT
done > ${TEST_TEXT_FILE}
# Verify contents of file
echo "Verifying length of test file"
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
then
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
return 1
fi
check_file_size "${TEST_TEXT_FILE}" $(($TEST_TEXT_FILE_LENGTH * $(echo $TEST_INPUT | wc -c)))
rm_test_file
}
@ -32,15 +28,24 @@ function test_truncate_file {
# Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...)
: > ${TEST_TEXT_FILE}
# Verify file is zero length
if [ -s ${TEST_TEXT_FILE} ]
then
echo "error: expected ${TEST_TEXT_FILE} to be zero length"
return 1
fi
check_file_size "${TEST_TEXT_FILE}" 0
rm_test_file
}
function test_truncate_upload {
describe "Testing truncate file for uploading ..."
# This file size uses multipart, mix upload when uploading.
# We will test these cases.
rm_test_file ${BIG_FILE}
truncate ${BIG_FILE} -s ${BIG_FILE_LENGTH}
rm_test_file ${BIG_FILE}
}
function test_truncate_empty_file {
describe "Testing truncate empty file ..."
# Write an empty test file
@ -50,13 +55,8 @@ function test_truncate_empty_file {
t_size=1024
truncate ${TEST_TEXT_FILE} -s $t_size
# Verify file is zero length
size=$(stat -c %s ${TEST_TEXT_FILE})
if [ $t_size -ne $size ]
then
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
return 1
fi
check_file_size "${TEST_TEXT_FILE}" $t_size
rm_test_file
}
@ -77,6 +77,9 @@ function test_mv_file {
# create the test file again
mk_test_file
# save file length
ALT_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
#rename the test file
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
if [ ! -e $ALT_TEST_TEXT_FILE ]
@ -84,9 +87,14 @@ function test_mv_file {
echo "Could not move file"
return 1
fi
#check the renamed file content-type
if [ -f "/etc/mime.types" ]
then
check_content_type "$1/$ALT_TEST_TEXT_FILE" "text/plain"
fi
# Check the contents of the alt file
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
then
@ -98,7 +106,7 @@ function test_mv_file {
rm_test_file $ALT_TEST_TEXT_FILE
}
function test_mv_directory {
function test_mv_empty_directory {
describe "Testing mv directory function ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
@ -108,7 +116,6 @@ function test_mv_directory {
mk_test_dir
mv ${TEST_DIR} ${TEST_DIR}_rename
if [ ! -d "${TEST_DIR}_rename" ]; then
echo "Directory ${TEST_DIR} was not renamed"
return 1
@ -121,6 +128,30 @@ function test_mv_directory {
fi
}
function test_mv_nonempty_directory {
describe "Testing mv directory function ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
return 1
fi
mk_test_dir
touch ${TEST_DIR}/file
mv ${TEST_DIR} ${TEST_DIR}_rename
if [ ! -d "${TEST_DIR}_rename" ]; then
echo "Directory ${TEST_DIR} was not renamed"
return 1
fi
rm -r ${TEST_DIR}_rename
if [ -e "${TEST_DIR}_rename" ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
return 1
fi
}
function test_redirects {
describe "Testing redirects ..."
@ -162,7 +193,7 @@ function test_redirects {
}
function test_mkdir_rmdir {
describe "Testing creation/removal of a directory"
describe "Testing creation/removal of a directory ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
@ -179,12 +210,13 @@ function test_chmod {
# create the test file again
mk_test_file
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
ORIGINAL_PERMISSIONS=$(get_permissions $TEST_TEXT_FILE)
chmod 777 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
CHANGED_PERMISSIONS=$(get_permissions $TEST_TEXT_FILE)
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE permissions"
return 1
@ -200,12 +232,28 @@ function test_chown {
# create the test file again
mk_test_file
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
if [ `uname` = "Darwin" ]; then
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
else
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
fi
chown 1000:1000 $TEST_TEXT_FILE;
# [NOTE]
# Prevents test interruptions due to permission errors, etc.
# If the chown command fails, an error will occur with the
# following judgment statement. So skip the chown command error.
# '|| true' was added due to a problem with Travis CI and MacOS
# and ensure_diskfree option.
#
chown 1000:1000 $TEST_TEXT_FILE || true
# if they're the same, we have a problem.
if [ $(stat --format=%u:%g $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
if [ `uname` = "Darwin" ]; then
CHANGED_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
else
CHANGED_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
fi
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
then
if [ $ORIGINAL_PERMISSIONS == "1000:1000" ]
then
@ -221,7 +269,7 @@ function test_chown {
}
function test_list {
describe "Testing list"
describe "Testing list ..."
mk_test_file
mk_test_dir
@ -236,14 +284,47 @@ function test_list {
}
function test_remove_nonempty_directory {
describe "Testing removing a non-empty directory"
describe "Testing removing a non-empty directory ..."
mk_test_dir
touch "${TEST_DIR}/file"
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
(
set +o pipefail
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
)
rm "${TEST_DIR}/file"
rm_test_dir
}
function test_external_directory_creation {
describe "Test external directory creation ..."
OBJECT_NAME="$(basename $PWD)/directory/${TEST_TEXT_FILE}"
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
ls | grep directory
get_permissions directory | grep ^750$
ls directory
cmp <(echo "data") directory/${TEST_TEXT_FILE}
rm -f directory/${TEST_TEXT_FILE}
}
function test_external_modification {
describe "Test external modification to an object ..."
echo "old" > ${TEST_TEXT_FILE}
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
sleep 2
echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp ${TEST_TEXT_FILE} <(echo "new new")
rm -f ${TEST_TEXT_FILE}
}
function test_read_external_object() {
describe "create objects via aws CLI and read via s3fs ..."
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
sleep 3
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp ${TEST_TEXT_FILE} <(echo "test")
rm -f ${TEST_TEXT_FILE}
}
function test_rename_before_close {
describe "Testing rename before close ..."
(
@ -262,6 +343,7 @@ function test_rename_before_close {
function test_multipart_upload {
describe "Testing multi-part upload ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
@ -278,6 +360,7 @@ function test_multipart_upload {
function test_multipart_copy {
describe "Testing multi-part copy ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
mv "${BIG_FILE}" "${BIG_FILE}-copy"
@ -289,18 +372,109 @@ function test_multipart_copy {
return 1
fi
#check the renamed file content-type
check_content_type "$1/${BIG_FILE}-copy" "application/octet-stream"
rm -f "/tmp/${BIG_FILE}"
rm_test_file "${BIG_FILE}-copy"
}
function test_multipart_mix {
describe "Testing multi-part mix ..."
if [ `uname` = "Darwin" ]; then
cat /dev/null > $BIG_FILE
fi
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH seek=0 count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH seek=0 count=1
# (1) Edit the middle of an existing file
# modify directly(seek 7.5MB offset)
# In the case of nomultipart and nocopyapi,
# it makes no sense, but copying files is because it leaves no cache.
#
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
cp ${BIG_FILE} ${BIG_FILE}-mix
MODIFY_START_BLOCK=$((15*1024*1024/2/4))
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
# Verify contents of file
echo "Comparing test file (1)"
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
then
return 1
fi
# (2) Write to an area larger than the size of the existing file
# modify directly(over file end offset)
#
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
cp ${BIG_FILE} ${BIG_FILE}-mix
OVER_FILE_BLOCK_POS=$((26*1024*1024/4))
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$OVER_FILE_BLOCK_POS conv=notrunc
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$OVER_FILE_BLOCK_POS conv=notrunc
# Verify contents of file
echo "Comparing test file (2)"
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
then
return 1
fi
# (3) Writing from the 0th byte
#
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
cp ${BIG_FILE} ${BIG_FILE}-mix
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc
# Verify contents of file
echo "Comparing test file (3)"
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
then
return 1
fi
# (4) Write to the area within 5MB from the top
# modify directly(seek 1MB offset)
#
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
cp ${BIG_FILE} ${BIG_FILE}-mix
MODIFY_START_BLOCK=$((1*1024*1024))
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
# Verify contents of file
echo "Comparing test file (4)"
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
then
return 1
fi
rm -f "/tmp/${BIG_FILE}"
rm -f "/tmp/${BIG_FILE}-mix"
rm_test_file "${BIG_FILE}"
rm_test_file "${BIG_FILE}-mix"
}
function test_special_characters {
describe "Testing special characters ..."
ls 'special' 2>&1 | grep -q 'No such file or directory'
ls 'special?' 2>&1 | grep -q 'No such file or directory'
ls 'special*' 2>&1 | grep -q 'No such file or directory'
ls 'special~' 2>&1 | grep -q 'No such file or directory'
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
(
set +o pipefail
ls 'special' 2>&1 | grep -q 'No such file or directory'
ls 'special?' 2>&1 | grep -q 'No such file or directory'
ls 'special*' 2>&1 | grep -q 'No such file or directory'
ls 'special~' 2>&1 | grep -q 'No such file or directory'
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
)
mkdir "TOYOTA TRUCK 8.2.2"
}
function test_symlink {
@ -317,30 +491,31 @@ function test_symlink {
[ -L $ALT_TEST_TEXT_FILE ]
[ ! -f $ALT_TEST_TEXT_FILE ]
rm -f $ALT_TEST_TEXT_FILE
}
function test_extended_attributes {
command -v setfattr >/dev/null 2>&1 || \
{ echo "Skipping extended attribute tests" ; return; }
describe "Testing extended attributes ..."
rm -f $TEST_TEXT_FILE
touch $TEST_TEXT_FILE
# set value
setfattr -n key1 -v value1 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
set_xattr key1 value1 $TEST_TEXT_FILE
get_xattr key1 $TEST_TEXT_FILE | grep -q '^value1$'
# append value
setfattr -n key2 -v value2 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
set_xattr key2 value2 $TEST_TEXT_FILE
get_xattr key1 $TEST_TEXT_FILE | grep -q '^value1$'
get_xattr key2 $TEST_TEXT_FILE | grep -q '^value2$'
# remove value
setfattr -x key1 $TEST_TEXT_FILE
! getfattr -n key1 --only-values $TEST_TEXT_FILE
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
del_xattr key1 $TEST_TEXT_FILE
! get_xattr key1 $TEST_TEXT_FILE
get_xattr key2 $TEST_TEXT_FILE | grep -q '^value2$'
rm_test_file
}
function test_mtime_file {
@ -364,17 +539,71 @@ function test_mtime_file {
#copy the test file with preserve mode
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
testmtime=`stat -c %Y $TEST_TEXT_FILE`
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
testmtime=`get_mtime $TEST_TEXT_FILE`
altmtime=`get_mtime $ALT_TEST_TEXT_FILE`
if [ "$testmtime" -ne "$altmtime" ]
then
echo "File times do not match: $testmtime != $altmtime"
return 1
fi
rm_test_file
rm_test_file $ALT_TEST_TEXT_FILE
}
function test_update_time() {
describe "Testing update time function ..."
# create the test
mk_test_file
ctime=`get_ctime $TEST_TEXT_FILE`
mtime=`get_mtime $TEST_TEXT_FILE`
sleep 2
chmod +x $TEST_TEXT_FILE
ctime2=`get_ctime $TEST_TEXT_FILE`
mtime2=`get_mtime $TEST_TEXT_FILE`
if [ $ctime -eq $ctime2 -o $mtime -ne $mtime2 ]; then
echo "Expected updated ctime: $ctime != $ctime2 and same mtime: $mtime == $mtime2"
return 1
fi
sleep 2
chown $UID $TEST_TEXT_FILE
ctime3=`get_ctime $TEST_TEXT_FILE`
mtime3=`get_mtime $TEST_TEXT_FILE`
if [ $ctime2 -eq $ctime3 -o $mtime2 -ne $mtime3 ]; then
echo "Expected updated ctime: $ctime2 != $ctime3 and same mtime: $mtime2 == $mtime3"
return 1
fi
sleep 2
set_xattr key value $TEST_TEXT_FILE
ctime4=`get_ctime $TEST_TEXT_FILE`
mtime4=`get_mtime $TEST_TEXT_FILE`
if [ $ctime3 -eq $ctime4 -o $mtime3 -ne $mtime4 ]; then
echo "Expected updated ctime: $ctime3 != $ctime4 and same mtime: $mtime3 == $mtime4"
return 1
fi
sleep 2
echo foo >> $TEST_TEXT_FILE
ctime5=`get_ctime $TEST_TEXT_FILE`
mtime5=`get_mtime $TEST_TEXT_FILE`
if [ $ctime4 -eq $ctime5 -o $mtime4 -eq $mtime5 ]; then
echo "Expected updated ctime: $ctime4 != $ctime5 and updated mtime: $mtime4 != $mtime5"
return 1
fi
rm_test_file
}
function test_rm_rf_dir {
describe "Test that rm -rf will remove directory with contents"
describe "Test that rm -rf will remove directory with contents ..."
# Create a dir with some files and directories
mkdir dir1
mkdir dir1/dir2
@ -390,35 +619,368 @@ function test_rm_rf_dir {
fi
}
function test_write_after_seek_ahead {
describe "Test writes succeed after a seek ahead"
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
rm testfile
function test_copy_file {
describe "Test simple copy ..."
dd if=/dev/urandom of=/tmp/simple_file bs=1024 count=1
cp /tmp/simple_file copied_simple_file
cmp /tmp/simple_file copied_simple_file
rm_test_file /tmp/simple_file
rm_test_file copied_simple_file
}
function test_write_after_seek_ahead {
describe "Test writes succeed after a seek ahead ..."
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
rm_test_file testfile
}
function test_overwrite_existing_file_range {
describe "Test overwrite range succeeds ..."
dd if=<(seq 1000) of=${TEST_TEXT_FILE}
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=1 count=1 bs=1024 conv=notrunc
cmp ${TEST_TEXT_FILE} <(
seq 1000 | head -c 1024
dd if=/dev/zero count=1 bs=1024
seq 1000 | tail -c +2049
)
rm_test_file
}
function test_concurrency {
describe "Test concurrent updates to a directory ..."
for i in `seq 5`; do echo foo > $i; done
for process in `seq 10`; do
for i in `seq 5`; do
file=$(ls `seq 5` | sed -n "$(($RANDOM % 5 + 1))p")
cat $file >/dev/null || true
rm -f $file
echo foo > $file || true
done &
done
wait
rm -f `seq 5`
}
function test_concurrent_writes {
describe "Test concurrent updates to a file ..."
dd if=/dev/urandom of=${TEST_TEXT_FILE} bs=$BIG_FILE_LENGTH count=1
for process in `seq 10`; do
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=$(($RANDOM % $BIG_FILE_LENGTH)) count=1 bs=1024 conv=notrunc &
done
wait
rm_test_file
}
function test_open_second_fd {
describe "read from an open fd ..."
rm_test_file second_fd_file
RESULT=$( (echo foo ; wc -c < second_fd_file >&2) 2>& 1>second_fd_file)
if [ "$RESULT" -ne 4 ]; then
echo "size mismatch, expected: 4, was: ${RESULT}"
return 1
fi
rm_test_file second_fd_file
}
function test_write_multiple_offsets {
describe "test writing to multiple offsets ..."
../../write_multiple_offsets.py ${TEST_TEXT_FILE} 1024 1 $((16 * 1024 * 1024)) 1 $((18 * 1024 * 1024)) 1
rm_test_file ${TEST_TEXT_FILE}
}
function test_write_multiple_offsets_backwards {
describe "test writing to multiple offsets ..."
../../write_multiple_offsets.py ${TEST_TEXT_FILE} $((20 * 1024 * 1024 + 1)) 1 $((10 * 1024 * 1024)) 1
rm_test_file ${TEST_TEXT_FILE}
}
function test_clean_up_cache() {
describe "Test clean up cache ..."
dir="many_files"
count=25
mkdir -p $dir
for x in $(seq $count); do
dd if=/dev/urandom of=$dir/file-$x bs=10485760 count=1
done
file_cnt=$(ls $dir | wc -l)
if [ $file_cnt != $count ]; then
echo "Expected $count files but got $file_cnt"
rm -rf $dir
return 1
fi
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
if [ "$CACHE_DISK_AVAIL_SIZE" -lt "$ENSURE_DISKFREE_SIZE" ];then
echo "Cache disk avail size:$CACHE_DISK_AVAIL_SIZE less than ensure_diskfree size:$ENSURE_DISKFREE_SIZE"
rm -rf $dir
return 1
fi
rm -rf $dir
}
function test_content_type() {
describe "Test Content-Type detection ..."
DIR_NAME="$(basename $PWD)"
touch "test.txt"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.txt" | grep "ContentType")
if ! echo $CONTENT_TYPE | grep -q "text/plain"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
touch "test.jpg"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.jpg" | grep "ContentType")
if ! echo $CONTENT_TYPE | grep -q "image/jpeg"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
touch "test.bin"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.bin" | grep "ContentType")
if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
mkdir "test.dir"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.dir/" | grep "ContentType")
if ! echo $CONTENT_TYPE | grep -q "application/x-directory"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
}
# create more files than -o max_stat_cache_size
function test_truncate_cache() {
describe "Test make cache files over max cache file size ..."
for dir in $(seq 2); do
mkdir $dir
for file in $(seq 75); do
touch $dir/$file
done
ls $dir
done
}
function test_cache_file_stat() {
describe "Test cache file stat ..."
dd if=/dev/urandom of="${BIG_FILE}" bs=${BIG_FILE_LENGTH} count=1
#
# get "testrun-xxx" directory name
#
CACHE_TESTRUN_DIR=$(ls -1 ${CACHE_DIR}/${TEST_BUCKET_1}/ 2>/dev/null | grep testrun 2>/dev/null)
#
# get cache file inode number
#
CACHE_FILE_INODE=$(ls -i ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE} 2>/dev/null | awk '{print $1}')
if [ -z ${CACHE_FILE_INODE} ]; then
echo "Not found cache file or failed to get inode: ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# get lines from cache stat file
#
CACHE_FILE_STAT_LINE_1=$(sed -n 1p ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE})
CACHE_FILE_STAT_LINE_2=$(sed -n 2p ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE})
if [ -z ${CACHE_FILE_STAT_LINE_1} ] || [ -z ${CACHE_FILE_STAT_LINE_2} ]; then
echo "could not get first or second line from cache file stat: ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# compare
#
if [ "${CACHE_FILE_STAT_LINE_1}" != "${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}" ]; then
echo "first line(cache file stat) is different: \"${CACHE_FILE_STAT_LINE_1}\" != \"${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}\""
return 1;
fi
if [ "${CACHE_FILE_STAT_LINE_2}" != "0:${BIG_FILE_LENGTH}:1:0" ]; then
echo "last line(cache file stat) is different: \"${CACHE_FILE_STAT_LINE_2}\" != \"0:${BIG_FILE_LENGTH}:1:0\""
return 1;
fi
#
# remove cache files directly
#
rm -f ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}
rm -f ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE}
#
# write a byte into the middle(not the boundary) of the file
#
CHECK_UPLOAD_OFFSET=$((10 * 1024 * 1024 + 17))
dd if=/dev/urandom of="${BIG_FILE}" bs=1 count=1 seek=${CHECK_UPLOAD_OFFSET} conv=notrunc
#
# get cache file inode number
#
CACHE_FILE_INODE=$(ls -i ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE} 2>/dev/null | awk '{print $1}')
if [ -z ${CACHE_FILE_INODE} ]; then
echo "Not found cache file or failed to get inode: ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# get lines from cache stat file
#
CACHE_FILE_STAT_LINE_1=$(sed -n 1p ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE})
CACHE_FILE_STAT_LINE_E=$(tail -1 ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE} 2>/dev/null)
if [ -z ${CACHE_FILE_STAT_LINE_1} ] || [ -z ${CACHE_FILE_STAT_LINE_E} ]; then
echo "could not get first or end line from cache file stat: ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# check first and cache file length from last line
#
# we should check all stat lines, but there are cases where the value
# differs depending on the processing system etc., then the cache file
# size is calculated and compared.
#
CACHE_LAST_OFFSET=$(echo ${CACHE_FILE_STAT_LINE_E} | cut -d ":" -f1)
CACHE_LAST_SIZE=$(echo ${CACHE_FILE_STAT_LINE_E} | cut -d ":" -f2)
CACHE_TOTAL_SIZE=$((${CACHE_LAST_OFFSET} + ${CACHE_LAST_SIZE}))
if [ "${CACHE_FILE_STAT_LINE_1}" != "${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}" ]; then
echo "first line(cache file stat) is different: \"${CACHE_FILE_STAT_LINE_1}\" != \"${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}\""
return 1;
fi
if [ ${BIG_FILE_LENGTH} -ne ${CACHE_TOTAL_SIZE} ]; then
echo "the file size indicated by the cache stat file is different: \"${BIG_FILE_LENGTH}\" != \"${CACHE_TOTAL_SIZE}\""
return 1;
fi
rm_test_file "${BIG_FILE}"
}
function test_upload_sparsefile {
describe "Testing upload sparse file ..."
rm_test_file ${BIG_FILE}
rm -f /tmp/${BIG_FILE}
#
# Make all HOLE file
#
truncate ${BIG_FILE} -s ${BIG_FILE_LENGTH}
#
# Write some bytes to ABOUT middle in the file
# (Dare to remove the block breaks)
#
WRITE_POS=$((${BIG_FILE_LENGTH} / 2 - 128))
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}" bs=1 count=16 seek=${WRITE_POS} conv=notrunc
#
# copy(upload) the file
#
cp /tmp/${BIG_FILE} ${BIG_FILE}
#
# check
#
cmp /tmp/${BIG_FILE} ${BIG_FILE}
rm_test_file ${BIG_FILE}
rm -f /tmp/${BIG_FILE}
}
function test_mix_upload_entities() {
describe "Testing upload sparse files ..."
#
# Make test file
#
dd if=/dev/urandom of=${BIG_FILE} bs=$BIG_FILE_LENGTH count=1
#
# If the cache option is enabled, delete the cache of uploaded files.
#
if [ -f ${CACHE_DIR}/${TEST_BUCKET_1}/${BIG_FILE} ]; then
rm -f ${CACHE_DIR}/${TEST_BUCKET_1}/${BIG_FILE}
fi
if [ -f ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${BIG_FILE} ]; then
rm -f ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${BIG_FILE}
fi
#
# Do a partial write to the file.
#
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=0 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=8192 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=1073152 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=26214400 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=26222592 conv=notrunc
rm_test_file "${BIG_FILE}"
}
function test_ut_ossfs {
describe "Testing ossfs python ut..."
export TEST_BUCKET_MOUNT_POINT=$TEST_BUCKET_MOUNT_POINT_1
../../ut_test.py
}
function add_all_tests {
add_tests test_append_file
add_tests test_truncate_file
if ! ps u $S3FS_PID | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
add_tests test_clean_up_cache
fi
add_tests test_append_file
add_tests test_truncate_file
add_tests test_truncate_upload
add_tests test_truncate_empty_file
add_tests test_mv_file
add_tests test_mv_directory
add_tests test_mv_empty_directory
add_tests test_mv_nonempty_directory
add_tests test_redirects
add_tests test_mkdir_rmdir
add_tests test_chmod
add_tests test_chown
add_tests test_list
add_tests test_remove_nonempty_directory
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
#add_tests test_rename_before_close
if ! ps u $S3FS_PID | grep -q notsup_compat_dir; then
# TODO: investigate why notsup_compat_dir fails
add_tests test_external_directory_creation
fi
add_tests test_external_modification
add_tests test_read_external_object
add_tests test_rename_before_close
add_tests test_multipart_upload
add_tests test_multipart_copy
add_tests test_multipart_mix
add_tests test_special_characters
add_tests test_symlink
add_tests test_extended_attributes
add_tests test_mtime_file
add_tests test_update_time
add_tests test_rm_rf_dir
add_tests test_copy_file
add_tests test_write_after_seek_ahead
add_tests test_overwrite_existing_file_range
add_tests test_concurrency
add_tests test_concurrent_writes
add_tests test_open_second_fd
add_tests test_write_multiple_offsets
add_tests test_write_multiple_offsets_backwards
add_tests test_content_type
add_tests test_truncate_cache
add_tests test_upload_sparsefile
add_tests test_mix_upload_entities
add_tests test_ut_ossfs
if `ps -ef | grep -v grep | grep s3fs | grep -q use_cache`; then
add_tests test_cache_file_stat
fi
}
init_suite

BIN
test/keystore.jks Normal file

Binary file not shown.

View File

@ -7,12 +7,12 @@
###
### UsageFunction <program name>
###
UsageFuntion()
UsageFunction()
{
echo "Usage: $1 [-h] [-y] [-all] <base directory>"
echo " -h print usage"
echo " -y no confirm"
echo " -all force all directoris"
echo " -all force all directories"
echo " There is no -all option is only to merge for other S3 client."
echo " If -all is specified, this shell script merge all directory"
echo " for s3fs old version."
@ -28,7 +28,7 @@ DIRPARAM=""
while [ "$1" != "" ]; do
if [ "X$1" = "X-help" -o "X$1" = "X-h" -o "X$1" = "X-H" ]; then
UsageFuntion $OWNNAME
UsageFunction $OWNNAME
exit 0
elif [ "X$1" = "X-y" -o "X$1" = "X-Y" ]; then
AUTOYES="yes"
@ -38,7 +38,7 @@ while [ "$1" != "" ]; do
if [ "X$DIRPARAM" != "X" ]; then
echo "*** Input error."
echo ""
UsageFuntion $OWNNAME
UsageFunction $OWNNAME
exit 1
fi
DIRPARAM=$1
@ -48,7 +48,7 @@ done
if [ "X$DIRPARAM" = "X" ]; then
echo "*** Input error."
echo ""
UsageFuntion $OWNNAME
UsageFunction $OWNNAME
exit 1
fi
@ -62,7 +62,7 @@ fi
echo "#############################################################################"
echo "[CAUTION]"
echo "This program merges a directory made in s3fs which is older than version 1.64."
echo "And made in other S3 client appilication."
echo "And made in other S3 client application."
echo "This program may be have bugs which are not fixed yet."
echo "Please execute this program by responsibility of your own."
echo "#############################################################################"
@ -104,7 +104,7 @@ for DIR in $DIRLIST; do
if [ "$ALLYES" = "no" ]; then
### Skip "d---------" directories.
### Other clients make directory object "dir/" which don't have
### "x-amz-meta-mode" attribyte.
### "x-amz-meta-mode" attribute.
### Then these directories is "d---------", it is target directory.
DIRPERMIT=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $1}'`
if [ "$DIRPERMIT" != "d---------" ]; then
@ -112,7 +112,7 @@ for DIR in $DIRLIST; do
fi
fi
### Comfirm
### Confirm
ANSWER=""
if [ "$AUTOYES" = "yes" ]; then
ANSWER="y"

View File

@ -1,7 +1,9 @@
s3proxy.endpoint=http://127.0.0.1:8080
s3proxy.authorization=aws-v4
s3proxy.secure-endpoint=https://127.0.0.1:8080
s3proxy.authorization=aws-v2-or-v4
s3proxy.identity=local-identity
s3proxy.credential=local-credential
s3proxy.keystore-path=keystore.jks
s3proxy.keystore-password=password
jclouds.provider=transient
jclouds.identity=remote-identity

View File

@ -34,60 +34,59 @@ if [ "X$1" = "X-h" -o "X$1" = "X-H" ]; then
fi
if [ "X$1" = "X" -o "X$2" = "X" -o "X$3" = "X" ]; then
func_usage $PRGNAME
exit -1
exit 1
fi
BUCKET=$1
CDIR=$2
CDIR="$2"
LIMIT=$3
SILENT=0
if [ "X$4" = "X-silent" ]; then
SILENT=1
fi
FILES_CDIR=$CDIR/$BUCKET
STATS_CDIR=$CDIR/\.$BUCKET\.stat
FILES_CDIR="${CDIR}/${BUCKET}"
STATS_CDIR="${CDIR}/.${BUCKET}.stat"
CURRENT_CACHE_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
#
# Check total size
#
if [ $LIMIT -ge `du -sb $FILES_CDIR | awk '{print $1}'` ]; then
if [ $LIMIT -ge $CURRENT_CACHE_SIZE ]; then
if [ $SILENT -ne 1 ]; then
echo "$FILES_CDIR is below allowed $LIMIT"
echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT"
fi
exit 0
fi
#
# Make file list by sorted access time
#
ALL_STATS_ATIMELIST=`find $STATS_CDIR -type f -exec echo -n {} \; -exec echo -n " " \; -exec stat -c %X {} \; | awk '{print $2":"$1}' | sort`
#
# Remove loop
#
TMP_ATIME=0
TMP_STATS=""
TMP_CFILE=""
for part in $ALL_STATS_ATIMELIST; do
TMP_ATIME=`echo $part | sed 's/\:/ /' | awk '{print $1}'`
TMP_STATS=`echo $part | sed 's/\:/ /' | awk '{print $2}'`
TMP_CFILE=`echo $TMP_STATS | sed s/\.$BUCKET\.stat/$BUCKET/`
if [ `stat -c %X $TMP_STATS` -eq $TMP_ATIME ]; then
rm -f $TMP_STATS $TMP_CFILE > /dev/null 2>&1
#
# Make file list by sorted access time
#
find "$STATS_CDIR" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read part
do
echo Looking at $part
TMP_ATIME=`echo "$part" | cut -d: -f1`
TMP_STATS="`echo "$part" | cut -d: -f2`"
TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/`
if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then
rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1
if [ $? -ne 0 ]; then
if [ $SILENT -ne 1 ]; then
echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)"
fi
exit -1
exit 1
else
if [ $SILENT -ne 1 ]; then
echo "remove file: $TMP_CFILE $TMP_STATS"
fi
fi
fi
if [ $LIMIT -ge `du -sb $FILES_CDIR | awk '{print $1}'` ]; then
if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then
if [ $SILENT -ne 1 ]; then
echo "finish removing files"
fi
@ -96,7 +95,7 @@ for part in $ALL_STATS_ATIMELIST; do
done
if [ $SILENT -ne 1 ]; then
TOTAL_SIZE=`du -sb $FILES_CDIR | awk '{print $1}'`
TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE"
fi

View File

@ -5,6 +5,7 @@
#
set -o errexit
set -o pipefail
# Require root
REQUIRE_ROOT=require-root.sh
@ -12,19 +13,53 @@ REQUIRE_ROOT=require-root.sh
source integration-test-common.sh
CACHE_DIR="/tmp/s3fs-cache"
rm -rf "${CACHE_DIR}"
mkdir "${CACHE_DIR}"
#reserve 200MB for data cache
source test-utils.sh
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
if [ `uname` = "Darwin" ]; then
# [FIXME]
# Only on MacOS, there are cases where process or system
# other than the s3fs cache uses disk space.
# We can imagine that this is caused by Timemachine, but
# there is no workaround, so s3fs cache size is set +1gb
# for error bypass.
#
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 1200))
else
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 200))
fi
export CACHE_DIR
export ENSURE_DISKFREE_SIZE
FLAGS=(
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}"
enable_content_md5
enable_noobj_cache
max_stat_cache_size=100
nocopyapi
nomultipart
notsup_compat_dir
sigv2
singlepart_copy_limit=$((10 * 1024)) # limit size to exercise multipart code paths
#use_sse # TODO: S3Proxy does not support SSE
)
start_s3proxy
#
# enable_content_md5
# Causes s3fs to validate file contents. This isn't included in the common
# options used by start_s3fs because tests may be performance tests
# singlepart_copy_limit
# Appeared in upstream s3fs-fuse tests, possibly a limitation of S3Proxy
# TODO: github archaeology to see why it was added.
#
start_s3fs -o enable_content_md5 \
-o singlepart_copy_limit=$((10 * 1024))
for flag in "${FLAGS[@]}"; do
echo "testing s3fs flag: $flag"
./integration-test-main.sh
start_s3fs -o $flag
./integration-test-main.sh
stop_s3fs
done
stop_s3proxy
echo "$0: tests complete."

View File

@ -1,6 +1,9 @@
#!/bin/bash
#### Test utils
set -o errexit
set -o pipefail
# Configuration
TEST_TEXT="HELLO WORLD"
@ -12,6 +15,65 @@ BIG_FILE=big-file-s3fs.txt
BIG_FILE_LENGTH=$((25 * 1024 * 1024))
export RUN_DIR
if [ `uname` = "Darwin" ]; then
export SED_BUFFER_FLAG="-l"
else
export SED_BUFFER_FLAG="--unbuffered"
fi
function get_xattr() {
if [ `uname` = "Darwin" ]; then
xattr -p "$1" "$2"
else
getfattr -n "$1" --only-values "$2"
fi
}
function set_xattr() {
if [ `uname` = "Darwin" ]; then
xattr -w "$1" "$2" "$3"
else
setfattr -n "$1" -v "$2" "$3"
fi
}
function del_xattr() {
if [ `uname` = "Darwin" ]; then
xattr -d "$1" "$2"
else
setfattr -x "$1" "$2"
fi
}
function get_size() {
if [ `uname` = "Darwin" ]; then
stat -f "%z" "$1"
else
stat -c %s "$1"
fi
}
function check_file_size() {
FILE_NAME="$1"
EXPECTED_SIZE="$2"
# Verify file is zero length via metadata
size=$(get_size ${FILE_NAME})
if [ $size -ne $EXPECTED_SIZE ]
then
echo "error: expected ${FILE_NAME} to be zero length"
return 1
fi
# Verify file is zero length via data
size=$(cat ${FILE_NAME} | wc -c)
if [ $size -ne $EXPECTED_SIZE ]
then
echo "error: expected ${FILE_NAME} to be $EXPECTED_SIZE length, got $size"
return 1
fi
}
function mk_test_file {
if [ $# == 0 ]; then
TEXT=$TEST_TEXT
@ -24,6 +86,21 @@ function mk_test_file {
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
fi
# wait & check
BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'`
TRY_COUNT=10
while true; do
MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then
break
fi
TRY_COUNT=`expr $TRY_COUNT - 1`
if [ $TRY_COUNT -le 0 ]; then
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
fi
sleep 1
done
}
function rm_test_file {
@ -65,9 +142,10 @@ function cd_run_dir {
echo "TEST_BUCKET_MOUNT_POINT variable not set"
exit 1
fi
RUN_DIR=$(mktemp --directory ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
RUN_DIR=${TEST_BUCKET_MOUNT_POINT_1}/${1}
mkdir -p ${RUN_DIR}
cd ${RUN_DIR}
}
}
function clean_run_dir {
if [ -d ${RUN_DIR} ]; then
@ -105,7 +183,7 @@ function add_tests {
# Log test name and description
# describe [DESCRIPTION]
function describe {
echo "${FUNCNAME[1]}: "$@""
echo "${FUNCNAME[1]}: \"$*\""
}
# Runs each test in a suite and summarizes results. The list of
@ -114,7 +192,8 @@ function describe {
# made after the test run.
function run_suite {
orig_dir=$PWD
cd_run_dir
key_prefix="testrun-$RANDOM"
cd_run_dir $key_prefix
for t in "${TEST_LIST[@]}"; do
# The following sequence runs tests in a subshell to allow continuation
# on test failure, but still allowing errexit to be in effect during
@ -125,7 +204,7 @@ function run_suite {
# Other ways of trying to capture the return value will also disable
# errexit in the function due to bash... compliance with POSIX?
set +o errexit
(set -o errexit; $t)
(set -o errexit; $t $key_prefix)
if [[ $? == 0 ]]; then
report_pass $t
else
@ -154,3 +233,49 @@ function run_suite {
return 0
fi
}
function get_ctime() {
if [ `uname` = "Darwin" ]; then
stat -f "%c" "$1"
else
stat -c "%Z" "$1"
fi
}
function get_mtime() {
if [ `uname` = "Darwin" ]; then
stat -f "%m" "$1"
else
stat -c "%Y" "$1"
fi
}
function get_permissions() {
if [ `uname` = "Darwin" ]; then
stat -f "%p" "$1"
else
stat -c "%a" "$1"
fi
}
function check_content_type() {
INFO_STR=`aws_cli s3api head-object --bucket ${TEST_BUCKET_1} --key $1`
if [[ "${INFO_STR}" != *"$2"* ]]
then
echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}"
exit 1
fi
}
function get_disk_avail_size() {
DISK_AVAIL_SIZE=`BLOCKSIZE=$((1024 * 1024)) df $1 | awk '{print $4}' | tail -n 1`
echo ${DISK_AVAIL_SIZE}
}
function aws_cli() {
FLAGS=""
if [ -n "${S3FS_PROFILE}" ]; then
FLAGS="--profile ${S3FS_PROFILE}"
fi
aws $* --endpoint-url "${S3_URL}" --no-verify-ssl $FLAGS
}

81
test/ut_test.py Executable file
View File

@ -0,0 +1,81 @@
#!/usr/bin/env python2
import os
import unittest
import ConfigParser
import random
import sys
import time
class OssfsUnitTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def random_string(self, len):
char_set = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g']
list = []
for i in range(0, len):
list.append(random.choice(char_set))
return "".join(list)
def test_read_file(self):
filename = "%s" % (self.random_string(10))
print filename
f = open(filename, 'w')
data = self.random_string(1000)
f.write(data)
f.close()
f = open(filename, 'r')
data = f.read(100)
self.assertEqual(len(data), 100)
data = f.read(100)
self.assertEqual(len(data), 100)
f.close()
def test_rename_file(self):
filename1 = "%s" % (self.random_string(10))
filename2 = "%s" % (self.random_string(10))
print filename1, filename2
f = open(filename1, 'w+')
data1 = self.random_string(1000)
f.write(data1)
os.rename(filename1, filename2)
f.seek(0, 0)
data2 = f.read()
f.close()
self.assertEqual(len(data1), len(data2))
self.assertEqual(data1, data2)
def test_rename_file2(self):
filename1 = "%s" % (self.random_string(10))
filename2 = "%s" % (self.random_string(10))
print filename1, filename2
f = open(filename1, 'w')
data1 = self.random_string(1000)
f.write(data1)
f.close()
os.rename(filename1, filename2)
f = open(filename2, 'r')
f.seek(0, 0)
data2 = f.read()
f.close()
self.assertEqual(len(data1), len(data2))
self.assertEqual(data1, data2)
if __name__ == '__main__':
unittest.main()

18
test/write_multiple_offsets.py Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env python2
import os
import sys
if len(sys.argv) < 4 or len(sys.argv) % 2 != 0:
sys.exit("Usage: %s OUTFILE OFFSET_1 SIZE_1 [OFFSET_N SIZE_N]...")
filename = sys.argv[1]
fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY)
try:
for i in range(2, len(sys.argv), 2):
data = "a" * int(sys.argv[i+1])
os.lseek(fd, int(sys.argv[i]), os.SEEK_SET)
os.write(fd, data)
finally:
os.close(fd)