500 Commits

Author SHA1 Message Date
b0681246b9 Merge pull request #595 from ggtakec/master
Updated ChangeLog and configure.ac for release 1.81
2017-05-13 20:12:47 +09:00
52853f6b47 Updated ChangeLog and configure.ac for release 1.81 2017-05-13 11:02:54 +00:00
f6eb841a24 Updated ChangeLog and configure.ac for release 1.81 2017-05-13 10:54:54 +00:00
caea087aec Merge pull request #594 from ggtakec/master
Check bucket at public bucket and add nocopyapi option automatically
2017-05-13 17:01:21 +09:00
d2ae14d8b7 Check bucket at public bucket and add nocopyapi option automatically 2017-05-13 07:48:50 +00:00
7115835834 Check bucket at public bucket and add nocopyapi option automatically 2017-05-13 07:35:55 +00:00
551c6acf67 Merge pull request #593 from ggtakec/master
Backward compatible for changing default transport to HTTPS
2017-05-13 16:09:22 +09:00
24df69f688 Backward compatible for changing default transport to HTTPS 2017-05-13 06:47:51 +00:00
23a10dd644 Merge pull request #590 from ggtakec/master
Updated man page for default_acl option - #567
2017-05-09 23:29:04 +09:00
034042f511 Updated man page for default_acl option - #567 2017-05-09 14:18:19 +00:00
465c15ef40 Merge pull request #588 from andrewgaul/https
Default transport to HTTPS
2017-05-09 23:04:57 +09:00
a22675bafd Merge pull request #567 from andrewgaul/default-acl
Do not send ACL unless overridden
2017-05-09 23:03:27 +09:00
0e0ae38f6d Default transport to HTTPS
This protects private data when used over the public Internet.  Users
can opt-in to unencrypted HTTP if they need additional performance on
a local network.  Fixes #282.
2017-05-07 10:59:54 -07:00
7b30d5d15b Do not send canned ACL header when empty string
Some providers such as StorageGRID do not support canned ACLs.
Setting to empty allows callers to omit the header.  References #125.
2017-05-07 10:52:31 -07:00
4a5c9bef89 Merge pull request #587 from ggtakec/master
Changed copyright year format for debian pkg
2017-05-07 20:33:38 +09:00
9d10a5aa70 Changed copyright year format for debian pkg 2017-05-07 11:24:17 +00:00
107757f11d Merge pull request #585 from ggtakec/master
Fixed failure to upload/copy with SSE_C and SSE_KMS
2017-05-07 18:56:17 +09:00
a12e0d5ec4 Fixed failure to upload/copy with SSE_C and SSE_KMS 2017-05-07 09:29:08 +00:00
42cdcbc2dc Merge pull request #583 from ggtakec/master
Updated limit object size in s3fs man page
2017-05-06 13:51:01 +09:00
eef549dac7 Updated limit object size in s3fs man page 2017-05-06 04:34:07 +00:00
c8ee132813 Merge pull request #582 from ggtakec/master
Check errors returned in 200 OK responses for put header request
2017-05-06 11:28:51 +09:00
d07c3f38b7 Check errors returned in 200 OK responses for put header request 2017-05-06 02:15:53 +00:00
73da168b93 Merge pull request #580 from ggtakec/master
Enhanced bucket/path parameter check
2017-05-06 05:13:05 +09:00
1fe0334c08 Enhanced bucket/path parameter check 2017-05-05 19:55:24 +00:00
7d09914f1f Merge pull request #579 from ggtakec/master
Added notsup_compat_dir option
2017-05-06 02:38:36 +09:00
3ac39d61f8 Added notsup_compat_dir option 2017-05-05 17:28:29 +00:00
c5677b4726 Merge pull request #578 from ggtakec/master
Refactored the get_object_attribute function
2017-05-05 19:24:01 +09:00
67685c3d49 Merge pull request #1 from ggtakec/test
Refactored the get_object_attribute function
2017-05-05 19:09:45 +09:00
864e20e1f2 Refactored the get_object_attribute function 2017-05-05 10:02:21 +00:00
51b3183cba Refactored the check_object_access function 2017-05-05 09:51:30 +00:00
f02b1bc352 Merge pull request #576 from ggtakec/master
Added option for complementing lack of stat mode
2017-05-04 13:12:47 +09:00
758b92e823 Added option for complementing lack of stat mode 2017-05-04 03:41:24 +00:00
df0ff3a2fd Merge pull request #556 from orozery/fix_nocache_multipart_upload
fix multipart upload handling without cache
2017-04-16 19:22:15 +09:00
edcf4c6218 Merge pull request #555 from orozery/dont_sign_empty_headers
don't sign empty headers (as they are discarded by libcurl)
2017-04-16 19:16:47 +09:00
28efff5986 Merge pull request #554 from orozery/cache_cleanup
cleanup cache directory when running out of disk space
2017-04-16 19:13:11 +09:00
efba9bcbc1 Merge pull request #553 from orozery/custom_cipher_suite
add TLS cipher suites customization
2017-04-16 19:09:27 +09:00
6bd179c92b Merge pull request #552 from orozery/foreground_threads
switch S3fsMultiCurl to use foreground threads
2017-04-16 19:05:16 +09:00
96764b7410 switch S3fsMultiCurl to use foreground threads 2017-04-09 16:56:49 +03:00
ff3eb1971f Merge branch 'master' into fix_nocache_multipart_upload 2017-04-09 22:13:33 +09:00
94ddcb8d4f Merge pull request #560 from ggtakec/master
Fixed about multipart uploading at no free space related to #509
2017-04-09 14:06:23 +09:00
b4c90d6957 Fixed a bug about multipart uploading at no disk free space related to #509 2017-04-09 04:37:20 +00:00
75b59a7c16 switch S3fsMultiCurl to use foreground threads 2017-04-04 15:32:53 +03:00
3bcca75a88 don't sign empty headers (as they are discarded by libcurl) 2017-04-04 15:24:20 +03:00
79ea1a1561 Merge pull request #558 from ggtakec/master
Fixed a bug in logic about truncating stat cache
2017-04-02 21:02:15 +09:00
f0f61b3b55 Fixed a bug in logic about truncating stat cache 2017-04-02 11:51:58 +00:00
b955391621 Merge pull request #557 from ggtakec/master
Added check_cache_dir_exist option(refixed #347) - #538
2017-04-02 17:32:08 +09:00
8de992d42d Added check_cache_dir_exist option(refixed #347) - #538 2017-04-02 08:17:12 +00:00
fef3fbc225 Added check_cache_dir_exist option(refixed #347) - #538 2017-04-02 08:10:16 +00:00
acb61880b9 Merge pull request #551 from ggtakec/master
Updated stat_cache_expire option description - #545
2017-04-02 16:32:47 +09:00
8ee95ff7ab fix multipart upload handling without cache 2017-04-02 10:27:43 +03:00
95578cad43 cleanup cache directory when running out of disk space 2017-04-02 10:22:12 +03:00
465bbd3729 Updated stat_cache_expire option description - #545 2017-04-02 07:19:16 +00:00
0fa895594e Merge pull request #550 from Vascom/patch-1
Add umount instruction for unplivileged user.
2017-04-02 16:05:40 +09:00
15573cd21e Add umount instruction for unplivileged user.
Ordinary user has no permissions to use umount command, so it is good add fuse unmount instruction in man file.
2017-03-29 11:43:05 +03:00
43df94719b Merge pull request #546 from ggtakec/master
Fixed double initialization of SSL library at foreground
2017-03-20 02:22:19 +09:00
980ba398bc Fixed double initialization of SSL library - #524 2017-03-19 17:11:18 +00:00
0d59ac51c1 Merge pull request #545 from ggtakec/master
Changed base cached time of stat_cache_expire option - #523
2017-03-20 00:40:26 +09:00
523043a2aa Changed base cached time of stat_cache_expire option - #523 2017-03-19 15:19:04 +00:00
277da2c64a Merge pull request #540 from andrewgaul/cppcheck
Address cppcheck 1.77 warnings
2017-03-19 15:27:34 +09:00
03217baa99 Address cppcheck 1.77 warnings 2017-03-06 12:41:08 -08:00
6affefff5b Merge pull request #509 from andrewgaul/mpu
Use server-provided ETag during complete upload
2017-03-06 21:54:51 +09:00
2506fe73fa Merge pull request #539 from andrewgaul/s3proxy
Upgrade to S3Proxy 1.5.2
2017-03-06 21:48:18 +09:00
25a03c370a Upgrade to S3Proxy 1.5.2
Release notes:

https://github.com/andrewgaul/s3proxy/releases/tag/s3proxy-1.5.2
2017-03-02 10:55:24 -08:00
d40da2c68b Merge pull request #520 from ggtakec/master
Added links for eventual consistency in README.md - #515
2017-01-15 17:38:27 +09:00
7d6312ac78 Added links for eventual consistency in README.md - #515 2017-01-15 08:23:55 +00:00
e26c69a327 Merge pull request #517 from hudsantos/master
Update s3fs.1 - removed duplicated word
2017-01-09 10:48:29 +09:00
ff196e4257 Update s3fs.1 2016-12-16 19:16:17 -02:00
19f0d498aa Merge pull request #513 from ggtakec/master
Added issue and PR templates.
2016-12-04 21:33:55 +09:00
97a806447e Added issue and PR templates. 2016-12-04 12:25:11 +00:00
a00af2385b Merge pull request #512 from ggtakec/master
Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468
2016-12-04 19:42:29 +09:00
6fc972972f Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468 2016-12-04 10:31:41 +00:00
989d403b1f Merge remote-tracking branch 'upstream/master' 2016-12-04 08:40:58 +00:00
7b307601b5 Merge pull request #511 from s3fs-fuse/issue#435
Fixed a bug about uploading NULL to some part of the file contents
2016-12-04 17:20:50 +09:00
d731ab3a8e force java to use openjdk7 2016-12-04 08:14:02 +00:00
174d934d52 Initializing java package by update-alternatives 2016-12-04 08:04:33 +00:00
b428f68acf Patched openjdk java path on travis at 12/4/2016 2016-12-04 07:45:28 +00:00
5350e03147 Test configure for what version of java openjdk 2016-12-04 07:23:23 +00:00
28c7888a50 Sepalated install openjdk-7-jdk for checking why travis could not install it. 2016-12-04 07:01:28 +00:00
915a1321c7 Use server-provided ETag during complete upload
This avoids calculating the MD5 locally and enables use with object
stores which do not use MD5 as ETag.
2016-11-23 18:48:57 -08:00
8a11d7bc2f Merge pull request #505 from andrewgaul/spelling
Correct typos
2016-11-20 09:10:27 +09:00
7aae4782d9 Merge pull request #504 from andrewgaul/test
Use describe helper function
2016-11-20 09:10:16 +09:00
aba9e29471 Merge pull request #503 from andrewgaul/mtime-test
Add missing call to mtime test
2016-11-20 09:10:08 +09:00
d375bca0d0 Correct typos 2016-11-19 15:57:41 -08:00
cd0c8599cc Use describe helper function 2016-11-19 15:36:02 -08:00
20878a1618 Add missing call to mtime test 2016-11-19 15:10:41 -08:00
edd0a11fb5 Merge pull request #494 from mapreri/typo
Fix typo s/destroied/destroyed/
2016-11-20 05:53:23 +09:00
5e4bafeab7 Merge pull request #498 from andrewgaul/s3proxy
Upgrade to S3Proxy 1.5.1
2016-11-20 05:44:07 +09:00
67a836223a Merge pull request #495 from driskell/fix_sse_copy
Fix invalid V4 signature on multipart copy requests
2016-11-20 05:43:10 +09:00
7e2d6a3eed Merge remote-tracking branch 'upstream/issue#435' 2016-11-19 20:36:02 +00:00
1ee5a468f4 Merge pull request #502 from ggtakec/issue#435
Fixed issue#435 branch codes for remaining bugs(2)
2016-11-20 05:28:10 +09:00
81e209bdd1 Fixed issue#435 codes 2016-11-19 20:09:35 +00:00
90eda81624 Merge remote-tracking branch 'upstream/issue#435' 2016-11-19 19:52:27 +00:00
cafe6015e3 Upgrade to S3Proxy 1.5.1
Enabled previously broken tests and test with default v4 signer.
Release notes:

https://github.com/andrewgaul/s3proxy/releases/tag/s3proxy-1.5.0
https://github.com/andrewgaul/s3proxy/releases/tag/s3proxy-1.5.1
2016-11-15 23:09:48 -08:00
2492dc60ce Fix invalid V4 signature on multipart copy requests 2016-11-15 23:09:23 -08:00
6f688770fd Fix invalid V4 signature on multipart copy requests 2016-11-13 13:22:00 +00:00
8c0b1d9c5b Fix typo s/destroied/destroyed/ 2016-11-11 23:27:17 +00:00
efde0ec9de Merge pull request #489 from ggtakec/master
Changed headers_t map using nocase compare function - #488
2016-10-23 23:23:31 +09:00
632495374b Chnaged headers_t map using nocase compare function - #488 2016-10-23 14:14:19 +00:00
15b797f3ee Merge pull request #488 from ggtakec/master
Fixed searching Content-Length without case sensitive - #480
2016-10-23 22:42:52 +09:00
a7a64d954a Fixed searching Content-Length without case sensitive - #480 2016-10-23 13:27:01 +00:00
cca217f613 Merge pull request #487 from driskell/debugging
Split header debugging onto multiple lines for easier reading
2016-10-23 21:51:38 +09:00
1a9cf6f66d Fixed searching Content-Length without case sensitive - #480 2016-10-23 12:40:51 +00:00
02d7296210 Split header debugging onto multiple lines for easier reading 2016-10-22 15:11:18 +01:00
a688df813e Fixed a bug at read symlink 2016-10-11 13:32:08 +00:00
164424bc89 Merge branch 'master' into issue#435 2016-10-11 12:13:03 +00:00
f38aaa3d0e Merge pull request #483 from ggtakec/master
Trim symbolic link original path in file.
2016-10-11 19:31:34 +09:00
7fabd18b1f Trim symbolic link original path in file. 2016-10-11 10:22:30 +00:00
5db369d67e Trim symbolic link original path in file. 2016-10-11 10:17:46 +00:00
dba32fdf78 Trim symbolic link original path in file. 2016-10-11 10:06:21 +00:00
716baada22 Testing patch codes for issue#435 2016-10-10 12:16:09 +00:00
1a93897e85 Merge pull request #477 from ggtakec/master
OS-specific correspondence of the extended attribute header
2016-10-02 16:34:55 +09:00
9fd1368611 OS-specific correspondence of the extended attribute header 2016-10-02 07:23:19 +00:00
9f174d7614 Merge pull request #471 from ggtakec/master
Added use_xattr option for #467 and #460
2016-09-19 14:00:20 +09:00
65d52506c4 Added use_xattr option for #467 and #460 2016-09-19 04:28:01 +00:00
a56fe0ea28 Merge pull request #466 from ggtakec/master
Fixed a bug about could not copy file mode from org file
2016-09-11 23:11:22 +09:00
ec110bb0f3 Added small logic in test script for test_chown 2016-09-11 13:41:50 +00:00
232befb52a Added small logic in test script for test_chown 2016-09-11 13:37:53 +00:00
f363c21ff5 Added comments in failure message for test_chown 2016-09-11 13:28:40 +00:00
1a96f40a10 Fixed a bug about could not copy file mode from org file 2016-09-11 13:09:23 +00:00
6be3236b28 Merge pull request #451 from kasbah/patch-1
Correct path in README
2016-07-24 17:32:35 +09:00
ccefd835d0 Merge pull request #454 from ggtakec/master
Changed for accepting mount options compatible with mtab - #449
2016-07-24 17:31:45 +09:00
1ddc14d59d Changed for accepting mount options compatible with mtab - #449 2016-07-24 08:17:58 +00:00
87f617374a Merge pull request #449 from treestem/mountopt
Accept mount options compatible with mtab
2016-07-24 16:48:33 +09:00
b76fc350b0 Merge pull request #447 from dsulli99/master
added fuse package for mounting via /etc/fstab, fixes #417
2016-07-18 22:25:30 +09:00
4deb6fdd84 added fuse package for mounting via /etc/fstab, fixes #417 2016-07-15 06:12:47 -05:00
2d5be2157a Correct path in README 2016-07-14 16:55:03 +01:00
a19206cf0f Accept mount arguments compatible with mtab
Using "mount -a" fails for already-mounted s3fs directories,
because s3fs mount arguments don't match the form in /etc/mtab.
Calling "mount -a" should quietly succeed when a directory is
already mounted.

To fix this, accept mount commands of the form:

s3fs s3fs /srv/object-store -o bucket=mybucket

or in /etc/fstab form:

s3fs  /srv/object-store  fuse.s3fs  bucket=mybucket 0 0

This matches the form in /etc/mtab and allows "mount -a" to
work properly.
2016-07-13 17:23:33 -04:00
0f9428ad5a Add mirror file logic for removing cache file 2016-07-13 17:23:32 -04:00
d748b333ee Merge pull request #444 from ggtakec/master
Add mirror file logic for removing cache file
2016-07-03 13:07:59 +09:00
e8a8019a71 Add mirror file logic for removing cache file 2016-07-03 03:37:08 +00:00
e8680b485d Merge pull request #443 from tlevi/master
Fix for leaks during stat cache entry expiry / truncation (#340)
2016-07-03 12:24:41 +09:00
ab4b92074c Fix cppcheck complaint 2016-06-30 11:51:25 +09:30
d57c12d3c3 Possible fix for leaks during entry expiry / truncation (#340) 2016-06-28 15:01:14 +09:30
676b2090fb Merge pull request #440 from ggtakec/master
Handled all curl error without exiting process - #437
2016-06-27 20:04:29 +09:00
6005929a96 Handled all curl error without exiting process - #437 2016-06-27 10:38:49 +00:00
49ffaa1d94 Merge pull request #432 from andrewgaul/create-bucket-location
Include location constraint when creating bucket
2016-06-26 09:39:47 +09:00
9fb3fd1a4d Merge pull request #433 from andrewgaul/typo
Correct search and replace typo
2016-06-14 21:45:01 +09:00
28b2b5cac3 Merge pull request #431 from mapreri/typo
fix typo s/controll/control/
2016-06-14 21:43:37 +09:00
320b8e1171 Include location constraint when creating bucket
This allows creating buckets in non-default regions.  Also improve
do_create_bucket error handling.
2016-06-13 10:35:37 -07:00
95cb5d201f Correct search and replace typo 2016-06-13 10:25:33 -07:00
880708ab5f fix typo s/controll/control/ 2016-06-12 15:02:40 +00:00
36917f7780 Merge pull request #426 from s3fs-fuse/ggtakec-patch-directly
Updated to correct ChangeLog
2016-05-29 12:40:28 +09:00
fe44f81ef2 Updated to correct ChangeLog
Because there was an extra line break
2016-05-29 12:37:16 +09:00
a81a2091c3 Merge pull request #425 from ggtakec/master
Updated ChangeLog and configure.ac for release 1.80
2016-05-29 12:29:06 +09:00
88d6c20cde Updated ChangeLog and configure.ac for release 1.80 2016-05-29 03:19:02 +00:00
4ff41f2ebf Merge pull request #424 from ggtakec/master
Added travis CI badge in README.md
2016-05-29 12:02:19 +09:00
a7d2148c60 Added travis CI badge in README.md
Added travis CI badge in README.md.
2016-05-29 11:40:58 +09:00
980c0f81dd Merge pull request #422 from nturner/fix/iam-role-auto
Fixes for iam_role=auto
2016-05-29 09:22:58 +09:00
775e493b0a Merge pull request #420 from nturner/master
Skip early credential checks when iam_role=auto
2016-05-29 09:11:24 +09:00
584ea488bf Use role name instead of profile name when iam_role=auto
When using an instance with an IAM Role, transient credentials can be
found in http://169.254.169.254/latest/meta-data/ at
iam/security-credentials/role-name and s3fs tries to do this. However,
it is using the profile-name where role-name is needed. In many cases
the role and profile name are the same, but they are not always.

The simplest way to find the role name appears to be to GET
http://169.254.169.254/latest/meta-data/iam/security-credentials/
itself, which returns a listing of the role names for which temporary
credentials exist. (I think there will probably only be one, but we
probably want to split on newlines and take the first one here in case
that assumption is not valid). This is the approach the AWS SDK appears
to use (based on WireShark analysis).

Bug: https://github.com/s3fs-fuse/s3fs-fuse/issues/421
Signed-off-by: Nathaniel W. Turner <nate@houseofnate.net>
2016-05-24 13:34:19 -04:00
594c9ca7d2 Skip early credential checks when iam_role=auto
If user specifies iam_role=auto (or just iam_role), credentials will not
be loaded during early phase, so skip credential checks there.

Signed-off-by: Nathaniel W. Turner <nate@houseofnate.net>
2016-05-20 12:49:02 -04:00
c2b7a7e453 Merge pull request #415 from ggtakec/master
Fixed a bug about stat_cache_expire - #382
2016-05-14 18:14:56 +09:00
34b604cdfe Fixed a bug about stat_cache_expire - #382 2016-05-14 09:03:52 +00:00
d16d616f34 Merge pull request #411 from ggtakec/master
loading IAM role name automatically(iam_role option) - #387
2016-05-06 13:57:32 +09:00
50f1ad51c8 loading IAM role name automatically(iam_role option) - #387 2016-05-06 04:37:32 +00:00
fe253c3d22 Merge pull request #410 from ggtakec/master
Allow duplicate key in ahbe_conf - #386
2016-05-06 10:21:50 +09:00
6cc30eea44 Allow duplicate key in ahbe_conf - #386 2016-05-06 01:08:39 +00:00
6be264a17f Merge pull request #409 from ggtakec/master
Fixed 'load_sse_c' option not working - #388
2016-05-06 09:47:39 +09:00
1ddbd4d6bb Fixed 'load_sse_c' option not working - #388 2016-05-06 00:36:54 +00:00
845fdb43f2 Merge pull request #404 from rockuw/keep-alive
Add curl handler pool to reuse connections
2016-04-26 23:40:45 +09:00
72f6c4d2dc Merge pull request #403 from rockuw/master
Fix a bug of truncating empty file
2016-04-26 23:35:22 +09:00
cf23dc78ab Use 'return' instead of 'exit' in test 2016-04-22 16:24:26 +08:00
b78adb4bb0 Add curl handler pool to reuse connections 2016-04-22 14:57:31 +08:00
115bd51f3f Fix a bug of truncating empty file 2016-04-22 14:49:37 +08:00
b979d40778 Merge pull request #397 from ggtakec/master
Supported User-Agent header - #383
2016-04-17 17:01:59 +09:00
10589a9497 Supported User-Agent header - #383 2016-04-17 07:44:03 +00:00
2f5973c02b Merge pull request #395 from ggtakec/master
Fixed writing sparsed file - #375,#379,#394
2016-04-13 03:34:44 +09:00
090c37a1c1 Fixed writing sparsed file - #375,#379,#394 2016-04-12 18:24:36 +00:00
d048f380c1 Merge pull request #394 from s3fs-fuse/revert-379-master
Revert "Fixed a bug about writing sparsed file - #375"
2016-04-13 01:30:34 +09:00
fff40bbff3 Revert "Fixed a bug about writing sparsed file - #375" 2016-04-13 01:24:24 +09:00
daef00e38b Merge pull request #391 from dreh23/patch-1
Update s3fs.1
2016-04-10 15:39:21 +09:00
4ca1b90d00 Merge pull request #385 from mapreri/typo
fix typo in curl.cpp: s/returing/returning/

@mapreri thanks!
2016-04-10 14:02:57 +09:00
c5691b6c7c Merge pull request #376 from RobbKistler/seek-test
Test for writing after an lseek past end of file
2016-04-10 13:00:10 +09:00
fb2ee7cc02 Update s3fs.1 2016-04-09 00:31:01 +02:00
136ec654c2 fix typo in curl.cpp: s/returing/returning/ 2016-04-02 15:19:06 +00:00
4e583583cd Test for writing after an lseek past end of file
This is a test to demonstrate Issue #375
2016-03-23 16:03:38 -07:00
91861e7fcd Merge pull request #379 from ggtakec/master
Fixed a bug about writing sparsed file - #375
2016-03-22 15:06:31 +09:00
ded4faf2e4 Fixed a bug about writing sparsed file - #375 2016-03-22 05:44:14 +00:00
cf56b35766 Merge pull request #372 from ggtakec/master
Fixed a bug about etag comparison in stats cache, etc.
2016-03-13 18:59:38 +09:00
98d55582eb Chnaged about constructor(destructor) in cache.h 2016-03-13 09:47:37 +00:00
84bdd51021 Fixed a bug about etag comparison in stats cache. 2016-03-13 09:29:06 +00:00
fbd8959d69 Merge pull request #371 from ggtakec/master
Always set stats cache for opened file
2016-03-13 15:15:30 +09:00
67efc11d94 Always set stats cache for opened file 2016-03-13 05:43:28 +00:00
d6e6eebb95 Merge pull request #364 from ggtakec/master
Checked content-type by no case-sensitivity - #363
2016-02-13 15:08:23 +09:00
4c65c09f4d Checked content-type by no case-sensitivity - #363 2016-02-13 05:58:59 +00:00
b281328ff4 Merge pull request #359 from yurykats/issue358
Issue 358: Remove optional parameter from Content-Type header
2016-02-11 12:19:25 +09:00
e9d2b38726 Merge pull request #360 from RobbKistler/configure
Fix clock_gettime autotools detection on Linux
2016-02-10 00:41:14 +09:00
f4aac111a4 Fix clock_gettime autotools detection on Linux 2016-02-08 13:45:34 -08:00
230991782b Update s3fs_util.cpp 2016-02-08 16:39:56 -05:00
ac99df5c09 Merge pull request #357 from ggtakec/master
Fixed codes about clock_gettime for osx(3)
2016-02-07 17:30:23 +09:00
f81e6103cb Fixed codes about clock_gettime for osx(3) 2016-02-07 08:27:02 +00:00
cd04cb0875 Merge pull request #356 from ggtakec/master
Fixed codes about clock_gettime for osx(2)
2016-02-07 17:14:32 +09:00
0755c6f60c Fixed codes about clock_gettime for osx(2) 2016-02-07 08:10:23 +00:00
1c9d7a9ea9 Merge pull request #355 from ggtakec/master
Fixed codes about clock_gettime for osx
2016-02-07 16:44:20 +09:00
e01ded9e27 Fixed codes about clock_gettime for osx 2016-02-07 07:40:55 +00:00
bf056b213a Merge pull request #354 from ggtakec/master
Supported regex type for additional header format - #343
2016-02-07 15:16:51 +09:00
1af7aaeccb Fixed addhead.cpp for cppcheck 2016-02-07 05:53:56 +00:00
c7cf86c2ef Sepalated AdditionalHeader class from curl.* 2016-02-07 05:41:56 +00:00
6472eedddc Supported regex type for additional header format. 2016-02-07 05:08:52 +00:00
938554e569 Merge pull request #352 from ggtakec/master
Remove stat file cache dir if specified del_cache - #337
2016-02-07 04:05:45 +09:00
150b83f61e Remove stat file cache dir if specified del_cache - #337 2016-02-06 18:59:13 +00:00
87faed0d04 Merge pull request #351 from ggtakec/master
Check cache dirctory path and attributes - #347
2016-02-06 22:46:16 +09:00
c5a94cfc0c Check cache dirctory path and attributes - #347 2016-02-06 13:38:48 +00:00
f548e8ad5e Merge pull request #348 from RobbKistler/pushtests
Integration test summary, continue on error
2016-02-06 19:06:05 +09:00
203df6b58a Merge pull request #346 from RobbKistler/empty-dir
Fix empty directory check against AWS S3
2016-02-06 18:59:45 +09:00
0ac2f7cded Merge pull request #350 from ggtakec/master
Changed cache out logic for stat - #340
2016-02-06 18:37:58 +09:00
b90b51f2c5 Changed cache out logic for stat - #340 2016-02-06 09:09:17 +00:00
8b457133da Merge pull request #341 from hryang/master
Fix the memory leak issue in fdcache.
2016-02-06 14:41:28 +09:00
7bfaa24d25 Integration test summary, continue on error
Details in README.md and s3fs-integration-test-common.sh

Factor out s3fs-fuse and s3proxy start/stop.  The plan is to make it easier to
add test suites besides small-integration-test.sh that can test with various
s3fs options.

Each test run starts in a uniquely named at the top of the bucket.  This allows
multiple runs against persistent storage without worrying about cleaning
up in error conditions that leave artifiacts behind.

Tests continues if a test case fails.

Results are summarized at the end of the test run

Environment variable to control debug level of s3fs-fuse

Environment variable to enable public bucket (makes it easier to poke
around with tools like curl)

Environment variable to start s3fs-fuse under valgrind

Environment variable that casues script sets up s3fs-fuse and then wait
indefinitely, making it easy to experiment manually with the mount
point.

Additional test case
2016-02-05 05:40:28 -08:00
4eff6b4dd1 Fix empty directory check against AWS S3
For ListBucketResult on an empty directory, AWS S3 and S3Proxy 1.4
differ.  AWS will match the directory name, S3Proxy does not.

Changing max-keys=1 to max-keys-2 works for both implementations.
append_objects_from_xml() will swallow the directory key.  The log
level of this message is changed from ERROR to DBG.

Fixes #345
2016-02-04 23:13:00 -08:00
e3765ad497 Tune the code indent. 2016-01-28 11:16:06 +08:00
dd9f3aed36 Fix the memory leak issue in fdcache. See issue #340 2016-01-28 11:11:53 +08:00
ccfa13f295 Merge pull request #339 from ggtakec/master
Updated README.md for fstab example.
2016-01-24 14:38:48 +09:00
540c04e6cc Updated README.md for fstab example. 2016-01-24 05:34:28 +00:00
4b40727644 Merge pull request #338 from ggtakec/master
Fixed a bug about IAMCRED type could not be retried.
2016-01-24 14:10:39 +09:00
83937700dd Fixed a bug about IAMCRED type could not be retried. 2016-01-24 05:01:50 +00:00
2c156ceea2 Merge pull request #336 from Jirapong/master
update REAME.md for fstab
2016-01-22 00:10:02 +09:00
0615338592 update about netfs on boot 2016-01-19 12:06:10 +07:00
b847872622 update README.md for fstab 2016-01-19 11:22:55 +07:00
e932583309 Merge pull request #334 from andrewgaul/bucket-host
Bucket host should include port and not path
2016-01-17 14:46:40 +09:00
7410b7525f Merge pull request #329 from andrewgaul/v4-signature-get
Correct multiple issues with GET and v4 signing
2016-01-17 14:46:19 +09:00
88a4f04217 Bucket host should include port and not path
This resolves issues when using v4 signing with path-style requests.
2016-01-16 15:58:54 -08:00
ff607e1a2d Correct multiple issues with ListBucketRequest
* provide correct path
* sign query string
* URL encode query string
2016-01-16 10:17:20 -08:00
4bfbfa3621 Merge pull request #331 from andrewgaul/clang
Address various clang warnings
2016-01-16 16:14:50 +09:00
43b91d3235 Merge pull request #330 from andrewgaul/pass-by-reference
Pass by const reference where possible
2016-01-16 16:14:31 +09:00
9fa205f1c3 Merge pull request #328 from andrewgaul/v4-signature-path-request-style
Fix v4 signature with use_path_request_style
2016-01-16 16:14:00 +09:00
e003732f18 Address various clang warnings
Found with:

-Wc++11-extensions
-Wc++11-extra-semi
-Wmissing-variable-declarations
-Wundef
2016-01-11 00:52:24 -08:00
b946b59522 Pass by const reference where possible 2016-01-10 16:58:24 -08:00
ea6b287d1a Fix v4 signature with use_path_request_style
Previously s3fs omitted the bucket name when using path request style
causing SignatureDoesNotMatch with v4 signatures.
2016-01-10 13:41:56 -08:00
a6455ef1bc Merge pull request #323 from andrewgaul/readme
Add goofys to references
2016-01-10 04:26:52 +09:00
8e5e44bfce Add goofys to references 2016-01-07 16:14:11 -08:00
ea151a70c4 Merge pull request #321 from mcellis33/320
320: delete stat cache entry in s3fs_fsync so st_size is refreshed
2015-12-20 15:05:28 +09:00
1e1f2a66de Merge pull request #319 from RobbKistler/clean-exit
Clean up mount point on errors in s3fs_init()
2015-12-20 15:04:54 +09:00
163daa5de1 320: delete stat cache entry in s3fs_fsync so st_size is refreshed 2015-12-18 15:39:25 -08:00
b581290c30 Cleanly exit fuse loop on error in s3fs_init
This allows FUSE to clean the mount point up, preventing
"Transport endpoint not connected" errors on subsequent
access to the mount.
2015-12-15 15:25:56 -08:00
1927ccfe0a Don't loop on fusermount if mountpoint is gone 2015-12-15 15:07:00 -08:00
8162d4925d Merge pull request #313 from mcellis33/gitignore
fix gitignore
2015-12-08 00:04:52 +09:00
2b3ece467b Merge pull request #311 from RobbKistler/dbg-message
Change error log to debug log in s3fs_read()
2015-12-08 00:03:48 +09:00
c2f9b38a95 fix gitignore 2015-12-04 15:21:32 -08:00
8e688816d4 Change error log to debug log in s3fs_read() 2015-12-03 21:25:27 -08:00
8dbd5a3f65 Merge pull request #310 from ggtakec/master
Update integration-test-main.sh as additional change for #300
2015-12-03 22:49:55 +09:00
4bd5ffb0fa Update integration-test-main.sh as additional change for #300 2015-12-03 13:44:43 +00:00
7b2e963636 Merge pull request #300 from bazeli/patch-1
Update integration-test-main.sh
(but it does not work now, I will fix it as soon as possible.)
2015-12-03 22:36:45 +09:00
87d04acb2f Merge pull request #309 from ggtakec/master
Check pthread prtability in configure as additional change for #307
2015-12-03 16:58:23 +09:00
759b44135a Check pthread prtability in configure as additional change for #307 2015-12-03 07:47:17 +00:00
8b53e0d931 Merge pull request #307 from rockuw/master
Fix pthread portability problem
2015-12-03 16:35:30 +09:00
7db23f9d03 Merge pull request #308 from ggtakec/master
Changed ensure free disk space as additional change for #306
2015-12-03 14:49:40 +09:00
3e655bad3b PTHREAD_MUTEX_RECURSIVE_NP is a enum not macro 2015-12-03 13:44:11 +08:00
5e97cb0f48 Changed ensure free disk space as additional change for #306 2015-12-03 05:40:26 +00:00
ef90e0deed Merge pull request #306 from guymguym/patch-1
Fix read concurrency to work in parallel count
2015-12-03 14:26:40 +09:00
f44b61c403 Fix pthread portability problem 2015-12-03 10:44:38 +08:00
Guy
6067af6ef1 Fix read concurrency to work in parallel count
When the prefetch size is limited to the multipart size, the entire parallel logic of the read flow does not have an opportunity to use parallel get.
This fix increases the read performance significantly over our own s3 on-premise solution.
2015-11-30 18:38:15 +02:00
d7a4fc2927 Merge pull request #304 from ggtakec/master
Fixed a bug about mtime - #299
2015-11-30 01:53:13 +09:00
7b62de80f6 Fixed a bug about mtime - #299 2015-11-29 15:53:53 +00:00
8ffff5ba96 Merge remote-tracking branch 'upstream/macosx' 2015-11-29 15:47:47 +00:00
e804441234 Added FAQ wiki page link in README.md 2015-11-26 21:31:03 +09:00
9cc0fd2240 Merge pull request #302 from RobbKistler/syslog-level
Fix syslog level used by S3FS_PRN_EXIT()
2015-11-26 20:35:32 +09:00
fff2952d5f Fix syslog level used by S3FS_PRN_EXIT()
Without converting from s3fs log levels to syslog levels, the syslog
ends up being LOG_EMERG which can cause a broadcast message to all
users.
2015-11-25 13:53:08 -08:00
b85bd53336 Update integration-test-main.sh
new test for mtime preservation copying file with `cp -p`
2015-11-24 17:29:54 +09:00
e1de134d94 Merge branch 'master' into macosx 2015-11-08 06:06:05 +00:00
5af6d4bd82 Merge pull request #295 from ggtakec/fixissue
File opened with O_TRUNC is not flushed - changed #291
2015-11-08 14:14:42 +09:00
c673d9d935 File opened with O_TRUNC is not flushed - changed #291 2015-11-08 04:55:17 +00:00
0fdda61fb5 Merge pull request #293 from SnakeHunt2012/master
Fix a small spelling issue.
2015-11-08 13:26:01 +09:00
331b8456a0 Merge pull request #291 from RobbKistler/truncate
Issue #290: File opened with O_TRUNC is not flushed
2015-11-08 13:23:06 +09:00
63b6f3635b Merge pull request #289 from RobbKistler/log-source-file
Print source file in log messages
2015-11-08 13:14:33 +09:00
c04bcce206 Fix a small spelling issue. 2015-11-06 16:49:37 +08:00
dd7d9268f2 Force flush in s3fs_open() if file is truncated. 2015-11-03 22:06:25 -08:00
a3ef5c820d Add file truncate test
This test creates a file with contents, truncates it to
zero and verifies that it is zero length.
2015-11-03 21:47:15 -08:00
e4da5c59b6 Print source file in log messages 2015-11-03 08:34:02 -08:00
ad2a406205 Merge pull request #288 from ggtakec/master
Fixed a bug about head request(copy) for SSE - issue#286
2015-11-01 23:10:04 +09:00
001206f7c1 Fixed a bug about head request(copy) for SSE - issue#286 2015-11-01 14:05:47 +00:00
2ef7f497f6 Fixed a bug about head request(copy) for SSE - issue#286 2015-11-01 13:54:47 +00:00
497b108109 Merge pull request #285 from andrewgaul/symlink-test
Add test for symlink
2015-11-01 18:46:06 +09:00
86f95b05bf Add test for symlink 2015-10-24 14:20:26 -07:00
70db77af38 Merge pull request #280 from ggtakec/master
Supported a object which is larger than free disk space
2015-10-21 00:53:07 +09:00
8dd234dd8f Fixed bugs about cppcheck error 2015-10-20 15:47:07 +00:00
83d46ef8c6 Fixed bugs about a object larger than free disk space 2015-10-20 15:19:04 +00:00
1b323a6252 Changed debug option to dbglevel in test script. 2015-10-18 17:31:31 +00:00
d102eb752d Supported a object which is larger than free disk space 2015-10-18 17:03:41 +00:00
4252fab685 Merge pull request #248 from andrewgaul/travis-docker
Enable integration tests for Travis
2015-10-19 00:40:28 +09:00
94e3dbb2dc Enable integration tests for Travis
http://blog.travis-ci.com/2015-10-14-opening-up-ubuntu-trusty-beta/
2015-10-14 15:57:15 -07:00
8f115078cd Merge pull request #278 from ggtakec/master
Supported for SSE KMS(#270)
2015-10-07 00:01:38 +09:00
f51ad1f33e Supported for SSE KMS 2015-10-06 14:46:14 +00:00
e29069b8dc Merge pull request #275 from ggtakec/master
Changed and cleaned the logic for debug message.
2015-10-01 05:01:39 +09:00
92e52dadd4 Changed and cleaned the logic for debug message. 2015-09-30 19:41:27 +00:00
a4b00897c1 Merge pull request #274 from ggtakec/master
Modified man page for storage_class option(#271)
2015-09-28 22:52:59 +09:00
f1b7f5ea95 Modified man page for storage_class option(#271) 2015-09-28 13:47:39 +00:00
6a9082f126 Merge pull request #271 from andrewgaul/storage-class
Add support for standard_ia storage class
2015-09-28 22:18:32 +09:00
48f0a6f811 Merge pull request #268 from RobbKistler/loopback
Use 127.0.0.1 not localhost in s3proxy wait loop
2015-09-28 22:12:29 +09:00
1b39b2d450 Merge pull request #267 from nickstinger/master
Added the _netdev option to the fstab example.
2015-09-28 22:11:00 +09:00
785ed642ba Add support for standard_ia storage class
This enables storage with lower at-rest prices, higher request prices,
and lower availability.  Also rework existing reduced redundancy
parsing into a more generic storage class.  More background on
standard_ia:

https://aws.amazon.com/blogs/aws/aws-storage-update-new-lower-cost-s3-storage-option-glacier-price-reduction/
2015-09-17 13:35:25 -07:00
3d5b8a7672 Use 127.0.0.1 not localhost in s3proxy wait loop
localhost doesn't always resolve to 127.0.0.1
2015-09-16 00:06:41 -07:00
0aef0cf765 Added the _netdev option to the fstab example.
Although the network device option (_netdev) may not work everywhere, the option likely does no harm on systems where it's not supported. By adding the option to the example, it will inform users of the necessity for post-network activation and how that might be accomplished.
2015-09-15 10:34:15 +09:00
489f9edec7 Merge pull request #266 from RobbKistler/fix-integration-test
Cleanup from PR #265
2015-09-13 16:44:41 +09:00
718db57ade Code review changes
Missed some cleanup from the code review
2015-09-13 00:31:56 -07:00
639dcf19b0 Merge pull request #265 from RobbKistler/fix-integration-test
Fix integration tests
2015-09-13 15:59:36 +09:00
53bc960224 Merge pull request #263 from RobbKistler/aws
Allow integration testing against Amazon S3
2015-09-13 15:55:28 +09:00
ead346c6d3 Merge pull request #261 from andrewgaul/help-timeouts
Correct help timeouts
2015-09-13 15:49:15 +09:00
375059d9f8 Merge pull request #260 from andrewgaul/help-wrap
Wrap help text at 80 characters
2015-09-13 15:48:28 +09:00
6b21d9d424 Code review changes 2015-09-11 16:09:00 -07:00
dac9844765 Fix remove_nonempty_directory test bug
Wrap the attempt to rmdir in an if statement, otherwise the entire
test process exists (errexit is set). This test expects the rmdir
to fail.
2015-09-11 15:24:17 -07:00
849e66f6a1 Change test_append_file to avoid object read-after-after-overwrite
Open the test file once outside of the tests for loop.  This helps avoid
object consistency problems when running against S3 providers without
strong consistency (like Amazon).  See Issue #263.
2015-09-11 15:24:17 -07:00
6a8a2e4800 Allow integration testing against Amazon S3
Example command line:
S3FS_CREDENTIALS_FILE=keyfile \
TEST_BUCKET_1=somebucket \
S3PROXY_BINARY="" \
URL="http://s3.amazonaws.com" ./small-integration-test.sh
2015-09-11 14:35:12 -07:00
0358908910 Correct help timeouts
Follow-on to #167.
2015-09-10 11:45:05 -07:00
32ce1a7267 Wrap help text at 80 characters 2015-09-10 11:43:09 -07:00
9ea8da839c Merge pull request #258 from juandiegogonzales/patch-1
Update README.md to better explain mount upon boot
2015-09-09 00:15:55 +09:00
39cec488d2 Merge pull request #257 from jesselsteele/patch-1
Update README.md: Bugfix password file permissions errors
2015-09-09 00:15:21 +09:00
96436df18d Merge pull request #256 from andrewgaul/readme
Add no atomic rename to limitations
2015-09-09 00:14:47 +09:00
3aabb5616c Update README.md to better explain mount upon boot
As a novice Linux user, I didn't know I had to add a line into /etc/fstab for automatic mount upon boot. It took me some minutes to research and notice the right process (at first, I even entered s3fs#mybucket into the command line).

This change will (hopefully) save time to unseasoned users.
2015-09-07 10:05:02 -05:00
8e55f45818 Update README.md
received "should not have others permissions" when mounting
did chmod 640 and received "should not have group permissions"
used chmod 600 after creating the password file and had no problems
2015-09-05 15:08:07 +08:00
ec4135c9ed Add no atomic rename to limitations 2015-09-01 13:10:21 -07:00
cfdfecb4d1 Merge pull request #253 from s3fs-fuse/fixmkdirp
Added chacking cache dir perms at starting.
2015-08-23 13:18:50 +09:00
97b8b34aab Added chacking cache dir perms at starting(2). 2015-08-23 04:14:57 +00:00
ce66430fac Added chacking cache dir perms at starting. 2015-08-23 03:57:34 +00:00
1fc56e6665 Merge pull request #252 from Ziggeo/fix-create-cache-directories
This fixes an issue with caching when the creation of a subdirectory …
2015-08-23 10:48:08 +09:00
d7d96907cf This fixes an issue with caching when the creation of a subdirectory within the cache is aborted because a common cached parent directory already exists. 2015-08-21 19:30:04 -04:00
eb97054f49 Merge pull request #251 from flandr/skip-xattr-tests
Skip xattr tests if utilities are missing
2015-08-22 02:11:02 +09:00
7280ca6a69 Skip xattr tests if utilities are missing 2015-08-21 10:05:14 -07:00
30b2a833a8 Merge pull request #250 from s3fs-fuse/issue#228
s3fs can print version with short commit hash - #228
2015-08-22 01:35:07 +09:00
8f8e52b91a s3fs can print version with short commit hash(2) - #228 2015-08-21 16:30:24 +00:00
751c868769 s3fs can print version with short commit hash - #228 2015-08-21 16:19:31 +00:00
c3a47c26ec Merge pull request #249 from andrewgaul/wget-quiet
Silence wget
2015-08-22 00:51:30 +09:00
632578f328 Merge pull request #247 from andrewgaul/base64
Base64 cleanup
2015-08-22 00:45:26 +09:00
5a4240b18d Merge pull request #246 from andrewgaul/coverity
Unlock during early return in TruncateCache
2015-08-22 00:43:34 +09:00
236aeb9dfd Silence wget 2015-08-20 11:38:27 -07:00
bcfadbe1a8 Unlock during early return in TruncateCache
Found via Coverity.  Regression from
dfa63345ed.
2015-08-19 13:54:14 -07:00
b5c027f15d Add unit tests for base64 encoding and decoding 2015-08-19 13:49:10 -07:00
15db80b459 NUL terminate decoded base64 string
For consistency with encoded strings.
2015-08-19 13:48:07 -07:00
76c0ef86e4 Move base64 and hex functions to string_util 2015-08-19 13:47:26 -07:00
a3e820e733 Merge pull request #245 from andrewgaul/map-duplicate-lookups
Elide duplicate lookups of std::map via iterators
2015-08-20 01:22:06 +09:00
a3568a1419 Merge pull request #243 from andrewgaul/cppcheck-travis
Run cppcheck during Travis builds
2015-08-20 01:20:15 +09:00
4ad57bdea5 Merge pull request #240 from andrewgaul/md5
Enable Content-MD5 during multipart upload part
2015-08-20 01:19:01 +09:00
085733d7c9 Merge pull request #239 from andrewgaul/google-code
Update stale Google Code reference in --help
2015-08-20 01:08:00 +09:00
fcb58aec3c Merge pull request #238 from andrewgaul/cppcheck
Enable all cppcheck rules
2015-08-20 01:06:50 +09:00
402c609316 Merge pull request #237 from andrewgaul/test-refactor
Refactor tests into individual functions
2015-08-20 01:03:03 +09:00
026a9f2bdc Merge pull request #235 from andrewgaul/complete-mpu-leak
Plug leak during complete multipart upload
2015-08-20 00:40:00 +09:00
1918d6fa2d Merge pull request #234 from andrewgaul/readme
Update README
2015-08-20 00:37:49 +09:00
fd04b9a437 Merge pull request #233 from andrewgaul/remove-inttostr
Remove IntToStr
2015-08-20 00:34:21 +09:00
ea99603b58 Merge pull request #232 from andrewgaul/stat-cache-locking
Always hold stat_cache_lock when using stat_cache
2015-08-20 00:28:57 +09:00
036612dbb0 Merge pull request #231 from andrewgaul/autolock
Rewrite AutoLock
2015-08-20 00:24:05 +09:00
67d1576dfb Elide duplicate lookups of std::map via iterators
Also remove use of C++11 std::map::at.
2015-08-18 14:00:42 -07:00
2850fe731b Run cppcheck during Travis builds 2015-08-18 03:01:14 -07:00
a157ac59ca Enable Content-MD5 during multipart upload part
This allows retries of multi-part uploads instead of discovering a
fatal error during complete multipart upload.  Also enable Content-MD5
for integration tests and refactor hexadecimal code.
2015-08-18 02:54:00 -07:00
20f425fe15 Update README
This better explains many of the features and limitations and removes
stale information.
2015-08-17 07:48:11 -07:00
32520fd1fb Update stale Google Code reference in --help 2015-08-16 23:30:41 -07:00
c0b21d8808 Enable all cppcheck rules 2015-08-16 17:13:24 -07:00
17d223b542 Refactor tests into individual functions 2015-08-16 15:50:17 -07:00
9c5bf0bb66 Plug leak during complete multipart upload 2015-08-15 22:38:24 -07:00
dfa63345ed Always hold stat_cache_lock when using stat_cache
We could further improve this code by holding stat_cache_lock before
calls to DelStat instead of unlocking then relocking it.
2015-08-14 20:14:12 -07:00
3f59b8da01 Rewrite AutoLock
Previously AutoLock::Lock allowed subsequent callers to proceed
without the lock.  Further is_locked was not always protected by
auto_mutex.  Finally AutoLock eagerly released auto_mutex when
recursively unlocking.  s3fs does not need recursive locks so we
rewrite and simplify AutoLock.  Partially surfaced by Coverity.
2015-08-14 20:00:56 -07:00
0ea88a73c7 Remove IntToStr
str duplicates this functionality.  Also add unit test.
2015-08-12 08:25:09 -07:00
2e344bb48f Merge pull request #229 from andrewgaul/test-rename-before-close
Convert rename_before_close to a shell script
2015-08-13 00:10:07 +09:00
c91a645782 Convert rename_before_close to a shell script #229 2015-08-12 15:09:34 +00:00
96f63a17c0 Merge pull request #224 from andrewgaul/cppcheck
Configure cppcheck
2015-08-13 00:05:31 +09:00
756d1e5e81 Configure cppcheck #224 2015-08-12 15:04:16 +00:00
2482aada43 Merge pull request #222 from andrewgaul/explicit
Annotate constructors as explicit
2015-08-12 23:42:03 +09:00
64146f69a4 Merge pull request #221 from andrewgaul/compare
Compare idiomatically
2015-08-12 23:41:24 +09:00
edb3c78fe9 Merge pull request #220 from andrewgaul/test-rmdir-nonempty-directory
Test removing a non-empty directory
2015-08-12 23:41:08 +09:00
49e32967ec Merge pull request #219 from andrewgaul/coverity
Address Coverity errors
2015-08-12 23:40:47 +09:00
5655cffd32 Merge pull request #217 from jelly/master
Override install, so that the make install does not install rename_before_close under /test
2015-08-12 23:40:18 +09:00
09dff484e1 Merge pull request #215 from RobbKistler/memleak
Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
2015-08-12 23:38:51 +09:00
deb0e9eec3 Merge pull request #213 from andrewgaul/rename-large-files
Parse ETag from copy multipart correctly
2015-08-12 23:38:22 +09:00
5d1c8a7eda Convert rename_before_close to a shell script 2015-08-11 20:51:18 -07:00
ff8a0c2eea Parse ETag from copy multipart correctly
Previously s3fs misparsed this, preventing renames of files larger
than 5 GB.  Integration test disabled until S3Proxy 1.5.0 is released.
2015-08-11 14:43:35 -07:00
cbf7777f41 Configure cppcheck 2015-08-08 05:18:51 -07:00
fcb55c2109 Fix mem leaks in openssl_auth.cpp, nss_auth.cpp
Fix memory leaks in openssl_auth.cpp:s3fs_sha256hexsum and
nss_auth.cpp:s3fs_sha256hexsum.  Leaks occur every time a file
is created.
2015-08-06 12:45:40 -07:00
b6fa2deb9f Annotate constructors as explicit
This prevents implicit conversions.
2015-08-05 23:41:53 -07:00
801ca0c2d3 Compare idiomatically 2015-08-05 23:35:08 -07:00
5f792a9a2b Test removing a non-empty directory 2015-08-05 23:31:13 -07:00
8ee71caabb Address Coverity errors
Fixed an uninitialized member, misordered NULL check, resource leak,
and unconsumed return value.
2015-08-05 23:28:06 -07:00
ed70f7763a Override install, so that the make install does not install
rename_before_close under /test
2015-08-01 17:15:00 +02:00
730262f000 Merge pull request #212 from s3fs-fuse/master
update content of the master to macosx branch.
2015-07-20 01:38:42 +09:00
cbc057bca7 Merge pull request #211 from s3fs-fuse/release179
Updated ChangeLog and configure.ac for v1.79
2015-07-20 01:23:35 +09:00
6442642656 Updated ChangeLog and configure.ac for v1.79 2015-07-19 16:14:33 +00:00
07a5a36b6a Merge pull request #207 from jalessio/fix_a_few_spelling_issues
Fixed a few small spelling issues.
2015-07-12 01:02:53 +09:00
912bc58df0 Fixed a few small spelling issues. 2015-07-10 11:50:40 -07:00
13a91a52e8 Merge pull request #204 from andrewgaul/xattr-test
Add integration test for xattr
2015-06-29 00:51:24 +09:00
4190130194 Merge pull request #202 from flandr/osx-xattr
Specialize {set,get}xattr for OS X
2015-06-29 00:29:57 +09:00
d9b124f91e Add integration test for xattr 2015-06-28 04:16:35 -07:00
9b3c87ec97 Specialize {set,get}xattr for OS X
These system calls take an extra 'position' parameter on OS X. A
non-zero position value is only valid for resource forks (the Darwin
VFS layer will reject anything else with EINVAL); this patch simply
adds and ignores the parameter on Apple platforms.

Allows building against OSXFUSE.
2015-06-25 12:56:15 -07:00
8f85e5e543 Merge pull request #200 from s3fs-fuse/fixbug
fixed fallback to sigv2 for bucket create and GCS
2015-06-20 13:45:45 +09:00
966d229787 fixed fallback to sigv2 for bucket create and GCS 2015-06-20 04:34:32 +00:00
4d49ace06b Merge pull request #192 from andrewgaul/special-characters
Simplify URL encoding
2015-06-20 11:47:22 +09:00
ad8c64104e Merge pull request #199 from s3fs-fuse/xattr
Supported extended attributes(retry)
2015-06-20 11:46:47 +09:00
d59eff4288 Merge pull request #198 from andrewgaul/travis
Disasble integration tests for Travis
2015-06-20 10:42:23 +09:00
219b155037 Disasble integration tests for Travis
The previous KVM infrastructure supported this but their new VMware
infrastructure does not.
2015-06-18 11:28:10 -07:00
fe3abed9f0 Chaged codes about iterator etc 2015-06-13 03:27:07 +00:00
0ecf4aa6b4 Chaged codes about iterator 2015-06-13 03:08:56 +00:00
477573265a Merge pull request #190 from Rotwang/master
Add a no_check_certificate option.
2015-06-13 11:12:35 +09:00
4e03acf17a Simplify URL encoding
This also encodes asterisk and tilde correctly when listing a file
with a V4 auth endpoint.  Also add tests for special characters
although s3proxy does not yet support V4 auth.
Fixes #188.  Fixes #194.
2015-06-10 13:15:58 -07:00
84fb3d83d8 Fixed xattr for binary value 2015-06-06 16:39:39 +00:00
3522e5eda3 Add no_check_certificate option which allows to ignore issues with self signed certs. 2015-05-20 17:32:36 +02:00
3056644969 Merge pull request #185 from andrewgaul/typos
Correct obvious typos in usage and README
2015-05-06 22:37:22 +09:00
91587ad2c8 Merge pull request #184 from andrewgaul/multipart-size
Add usage information for multipart_size
2015-05-06 22:36:37 +09:00
8a73d9fff0 Correct obvious typos in usage and README 2015-05-04 16:25:05 -07:00
28ee9f27b9 Add usage information for multipart_size
Also improve error message.
2015-05-04 16:21:58 -07:00
7ac58a1c69 Merge pull request #178 from andrewgaul/gitignore
Update .gitignore
2015-04-29 00:56:54 +09:00
3914281f1b Merge pull request #177 from andrewgaul/mailmap
Add .mailmap
2015-04-29 00:56:44 +09:00
3d734ad3e3 Merge pull request #176 from mooredan/master
configure.ac: detect target, if target is darwin (OSX), then
2015-04-29 00:55:35 +09:00
bb4075d7b9 Merge pull request #173 from andrewgaul/travis
Run integration tests via Travis
2015-04-29 00:54:14 +09:00
5b11ac0f4c Moved __APPLE__ #endif to correct position 2015-04-27 12:14:09 -07:00
7bc5f0ca13 Update .gitignore 2015-04-27 11:19:14 -07:00
14ce061215 Add .mailmap
This cleans up git shortlog output.
2015-04-27 11:17:39 -07:00
adb5a35097 configure.ac: detect target, if target is darwin (OSX), then
change the minimum version of fuse required.  Change the
checkers to use a variable for the minimum fuse version
instead of it being hardcoded in four different places.

src/s3fs.cpp: Use __APPLE__ define around fuse code that
is offensive to osxfuse. Not including the code doesn't
seem to matter.
2015-04-25 17:13:20 -07:00
b0a12bcac1 Disable rename_before_close
This test currently fails and interferes with the larger integration
test.  References #145.
2015-04-24 11:28:18 -07:00
39d4715b82 Run integration tests via Travis
Mail from the Travis team:

Thanks for the email. I have set up s3fs-fuse/s3fs-fuse with our alpha
testing stack which may allow you to use FUSE.

To use it, add the following to your .travis.yml:
dist: trusty

Please keep in mind that the service may become unavailable without
notice, and change details. We welcome your feedback as to what works
and what does not with this setup.
2015-04-23 21:25:24 -07:00
aac92bd6c0 Fixed wrong owner checking and return codes 2015-04-21 16:18:05 +00:00
f258a14070 Supported extended attributes, initial commit 2015-04-20 17:24:57 +00:00
3701f1c16b Merge pull request #171 from pabigot/mixedcase
Support buckets with mixed-case names
2015-04-21 02:06:07 +09:00
92fcee824b curl: use pathrequeststyle option when constructing Host endpoint
Buckets with mixed-case names can't be accessed with the virtual-hosted
style API due to DNS limitations.  S3FS has an option for
pathrequeststyle which is used for the URL, but it was not applied when
building the endpoint passed through the Host header.  Fix this, and
relax the validation on bucket names when using this style.

See: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro

Signed-off-by: Peter A. Bigot <pab@pabigot.com>
2015-04-19 08:31:40 -05:00
00f8e1d0ba Merge pull request #170 from s3fs-fuse/issue/#157
Reviewed and fixed response codes print in curl.cpp - #157
2015-04-18 23:37:28 +09:00
43191eea53 Added cache apt in travis.yml 2015-04-18 13:45:58 +00:00
490ed8f689 Reviewed and fixed response codes print in curl.cpp - #157 2015-04-18 13:32:04 +00:00
30152284cc Merge pull request #168 from kahing/fix-v4-host-endpoint
switch to use region specific endpoints to compute correct v4 signature
2015-04-18 18:02:45 +09:00
70097709b2 switch to use region specific endpoints to compute correct v4 signature
fix #133
2015-04-14 16:25:17 -07:00
07e007052a Merge pull request #167 from s3fs-fuse/timeoutbranch
Increased default connecting/reading/writing timeout value
2015-04-12 11:18:52 +09:00
bd27294ab0 Increased default connecting/reading/writing timeout value 2015-04-12 02:04:13 +00:00
5e5c20757b Merge pull request #165 from kahing/auth_v4_refactor
Auth v4 refactor
2015-04-12 08:13:25 +09:00
6231ae208a Merge pull request #164 from kahing/fix_v4_signing_host
send the correct Host header when using -o url
2015-04-12 08:12:51 +09:00
42a4f5fd95 Merge pull request #159 from andrewgaul/s3proxy-1.4.0
Upgrade to S3Proxy 1.4.0
2015-04-12 08:05:49 +09:00
6e0a302f7d refactor sigv4 to reduce code duplication 2015-04-09 15:11:59 -07:00
98af055d8b send the correct Host header when using -o url
fixes #161
2015-04-09 13:53:50 -07:00
fa5c7ff4df Upgrade to S3Proxy 1.4.0
Release notes:

https://github.com/andrewgaul/s3proxy/releases/tag/s3proxy-1.4.0
2015-03-29 23:59:39 -07:00
d7327df885 Merge pull request #156 from s3fs-fuse/issue/#126
Fixed a bug about ssl session sharing with libcurl older 7.23.0 - issue#126
2015-03-21 16:19:58 +09:00
0f13c8fe97 Fixed a bug about ssl session sharing with libcurl older 7.23.0 - issue/#126 2015-03-21 07:04:20 +00:00
44d740080b Merge pull request #155 from s3fs-fuse/bugfix
Fixed a bug: unable to mount bucket subdirectory
2015-03-21 13:39:19 +09:00
2fc3a4e91e Fixed a bug: unable to mount bucket subdirectory 2015-03-21 04:31:59 +00:00
66e0233410 Merge pull request #154 from s3fs-fuse/issue#149
Fixed url-encoding for ampersand etc on sigv4 - Improvement/#149
2015-03-21 11:32:08 +09:00
a04bec85b2 Fixed url-encoding for ampersand etc on sigv4 - Improvement/#149 2015-03-21 02:11:55 +00:00
f861b11a91 Merge pull request #147 from andrewgaul/s3proxy-snapshot
Use S3Proxy 1.4.0-SNAPSHOT
2015-03-11 01:41:58 +09:00
37f9bbd231 Merge pull request #146 from kahing/exit_handler_for_test
add exit handler to cleanup on failures
2015-03-11 01:41:42 +09:00
af004576f1 Merge pull request #150 from s3fs-fuse/fixbug
Fixed a bug not handling fsync - #145
2015-03-11 01:29:17 +09:00
26453c4874 Fixed a bug not handling fsync. 2015-03-10 16:18:03 +00:00
4e18bf0bc2 Use S3Proxy 1.4.0-SNAPSHOT 2015-03-09 18:05:14 -07:00
7c298e94f5 add exit handler to cleanup on failures
and other changes that make debugging easier
2015-03-09 15:56:38 -07:00
761d2399f2 Merge pull request #144 from andrewgaul/travis
Add Travis configuration
2015-03-10 01:37:50 +09:00
1210cf8c6c Add Travis configuration 2015-03-09 03:57:39 -07:00
524e005b5c Merge pull request #143 from s3fs-fuse/issue#141
Fixed a bug no use_cache case about fixed #138 - issue#141
2015-03-09 01:43:57 +09:00
d06b6d7d41 Fixed a bug no use_cache case about fixed #138 - issue#141 2015-03-08 16:41:14 +00:00
e66e5d1dfc Merge pull request #138 from s3fs-fuse/issue#97
Fixed bugs, not turn use_cache off and ty to load to end - issue#97
2015-03-04 17:52:22 +09:00
114966e7c0 Fixed bugs, not turn use_cache off and ty to load to end - issue#97 2015-03-04 08:48:37 +00:00
d2246297bd Merge pull request #137 from andrewgaul/integration-test-mpu
Add test for multi-part upload
2015-03-04 12:21:22 +09:00
8ec5decbce Merge pull request #136 from andrewgaul/integration-test-fixups
Small fixes to integration tests
2015-03-04 12:20:17 +09:00
0f7d77d599 Small fixes to integration tests
Use S3Proxy pid instead of self pid, ensure correct passwd
permissions, and use fusermount instead of umount so that non-root can
run tests.
2015-03-03 01:42:03 -08:00
699e3b3d79 Add test for multi-part upload 2015-03-02 17:17:30 -08:00
2f8ad7ace8 Merge pull request #135 from andrewgaul/mpu-v4
Correct V4 signature for initiate multipart upload
2015-03-01 22:57:10 +09:00
6b6567ec9b Merge pull request #134 from andrewgaul/mpu-v2
Include Content-Type in complete MPU V2 signature
2015-03-01 22:50:54 +09:00
c8c71650eb Merge pull request #131 from kahing/test-ls
Test ls
2015-03-01 22:47:55 +09:00
a07e804f57 Include Content-Type in complete MPU V2 signature
Previously this failed with SignatureDoesNotMatch since the headers
included it but the signature did not.  Fixes #125.
2015-02-28 18:03:21 -08:00
e9656810e3 Correct V4 signature for initiate multipart upload
Query parameters need a trailing = for V4 signatures.  Send correct
content-sha256 although Amazon does not seem to enforce this for
zero-length bodies.  Finally remove a stale comment.  Fixes #133.
2015-02-28 17:50:06 -08:00
4ee32d7559 test ls after creating files and dirs 2015-02-27 10:55:25 -08:00
53083202ba Merge pull request #132 from andrewgaul/s3proxy-integration-test
Use S3Proxy to run integration tests
2015-02-27 00:17:46 +09:00
574a48f81f Merge pull request #130 from kahing/refactor-integration-test
refactor integration tests create/cleanup file
2015-02-27 00:06:23 +09:00
1b1cf2d4bd Merge pull request #124 from timuralp/bug/fix_fallback_v2
Fallback to v2 signatures correctly.
2015-02-27 00:02:12 +09:00
e811ae1104 Use s3proxy to run integration tests
References #129.
2015-02-24 12:08:22 -08:00
d65bf4128d refactor integration tests create/cleanup file 2015-02-23 12:08:14 -08:00
be5735edb8 Fallback to v2 signatures correctly.
Missing parameter to SetSignatureV4() call in the fallback code path
results in not actually falling back.
2015-02-16 17:35:09 -08:00
5bf2b46fa3 Merge pull request #119 from s3fs-fuse/issue#107
Added new mp_umask option about issue#107, pr#110
2015-02-08 02:19:54 +09:00
cf2b0cca22 Added new mp_umask option about issue#107, pr#110 2015-02-07 17:16:45 +00:00
4ae5043534 Merge pull request #116 from s3fs-fuse/dev_sv4
Supported signature version 4
2015-02-03 01:43:48 +09:00
1424f87754 Supported signature version 4 for GnuTLS/NSS and automatically set endpoint/sigv2 2015-02-02 16:36:08 +00:00
4f953f9bd7 Clean codes for signature v4 and added new sigv2 option 2015-01-28 17:13:11 +00:00
0d2f3e2dc4 Fixed bugs, segfault and signature error at listing. 2015-01-24 16:36:30 +00:00
bb1f1d3faa Merged manually from caxapniy/s3fs-fuse/tree/1.77v4merge for signature v4 - #102 2015-01-20 16:31:36 +00:00
98daf16681 Merge pull request #104 from kahing/rename_before_close
fix rename before close
2015-01-14 00:40:41 +09:00
939ba2b4b3 Merge pull request #101 from adobos/directory_empty_optimization
Optimized function "bool directory_empty()"
2015-01-14 00:21:47 +09:00
d0b82428d5 Merge pull request #100 from adobos/dns_ssl_switch_bugfix
CURL handles not properly initialized to use DNS or SSL session caching.
2015-01-14 00:11:46 +09:00
902911765e Merge pull request #93 from andrewgaul/unit-test
Add simple unit tests for trim functions
2015-01-14 00:07:01 +09:00
03d84a07d1 fix rename before close
nautilus does this when you drag and drop to overwrite a file:

1) create .goutputstream-XXXXXX to write to
2) fsync the fd for .goutputstream-XXXXXX
3) rename .goutputstream-XXXXXX to target file
4) close the fd for .goutputstream-XXXXXX

previously, doing this on s3fs would result in an empty target file
because after the rename, s3fs would not flush the content of
.goutputstream-XXXXXX to target file.

this change moves the FdEntity from the old path to the new path
whenever rename happens. On flush s3fs would now flush the correct
content to the rename target.
2015-01-12 15:05:54 -08:00
1f686d93ff Merge pull request #103 from s3fs-fuse/issue#87
Remove prefix option in s3fs man page - issue#87
2015-01-06 23:49:11 +09:00
d95b9ef1ac Remove prefix option in s3fs man page - issue#87 2015-01-06 14:43:19 +00:00
045f1e7906 CURL handles were not properly initialized to use DNS caching, or SSL session caching. 2014-12-23 22:31:54 -08:00
69ef7fbefb Optimized function directory_empty: check for at most one entry when evaluating whether a directory is empty or not (as opposed to doing full directory listing) 2014-12-23 22:29:13 -08:00
a56b8db410 Add simple unit tests for trim functions
Subsequent commits will use this infrastructure.  Also reparent
prepare_url which relies on unrelated bucket, foreground2, and
pathrequeststyle symbols.
2014-12-06 18:07:14 -08:00
082eb24c12 Merge pull request #83 from tmwong2003/develop
Changed option processing to use strtol() to get a umask
2014-11-16 23:49:24 +09:00
f04b659f5e Changed option processing to use strtol() to get a umask
get_mode()/s3fs_strtoofft() does not handle octal umask values, which
results in unexpected behavior when trying to set a world-readable umask
value.
2014-11-12 23:29:41 +00:00
eedc621637 Merge pull request #79 from buptUnixGuys/master
Update curl.cpp
2014-11-09 00:05:04 +09:00
b31ec5c4af Update curl.cpp
The space causes signature mismatch when using "ahbe_conf" file to add additional headers.When s3 use the" x-amaz" header to calculates the signature, the format is as follow:
PUT

application/octet-stream
Wed, 05 Nov 2014 03:05:08 GMT
x-amz-acl:private
x-amz-meta-gid:0
x-amz-meta-mode:33188
x-amz-meta-mtime:1415156708
x-amz-meta-uid:0
There is no space after colon.
2014-11-05 11:28:33 +08:00
651e8c3158 Merge pull request #64 from andrewgaul/failed-read-eio
Return EIO on failed read
2014-11-03 01:03:32 +09:00
77d4d066b5 Merge pull request #74 from vincentbernat/fix/url-may-omit-scheme
url: handle scheme omission
2014-10-26 16:18:03 +09:00
1e97e99aa0 Merge pull request #73 from vincentbernat/fix/git-ignore
Small gitignore fixes
2014-10-26 16:12:44 +09:00
7212072ff0 url: handle scheme omission
When the scheme is omitted in URL overriding (for example `example.com`
instead of `https://example.com`), s3fs is modifying the URL by
inserting `s3.` in the middle of the name  (`examples3..com`).

This can be a bit difficult to troubleshoot and curl seems to handle
schema-less requests just fine. So, just handle this case correctly.
2014-10-23 10:25:17 +02:00
8bcab645e1 gitignore: add test-driver and compile
Those are generated by latest versions of autotools.
2014-10-23 10:02:59 +02:00
9013917d58 gitignore: use absolute path
The current content of `.gitignore` is using relative paths. For
example, `test/config.log` would be ignored while it doesn't seem to be
the intent. Use absolute paths. They are still relative to the root of
the repository.
2014-10-23 10:01:08 +02:00
1eddf92c35 Merge pull request #72 from s3fs-fuse/issue#68
Fixed #68(FreeBSD issue)
2014-10-22 23:30:32 +09:00
28d82c9ccd Fixed #68(FreeBSD issue) 2014-10-22 14:21:01 +00:00
2f90a04513 Merge pull request #71 from s3fs-fuse/issue#68
Fixed for #68(FreeBSD issue)
2014-10-21 23:58:20 +09:00
2724728476 Merge pull request #70 from s3fs-fuse/master
Fixed for #68(FreeBSD issue)
2014-10-21 23:56:41 +09:00
ed8f424c1a Merge pull request #69 from andrewgaul/always-true
Address clang always true warnings
2014-10-21 23:53:30 +09:00
50137fe026 Address clang always true warnings 2014-10-16 23:34:12 -07:00
9237d07226 Merge pull request #63 from jollyroger/spelling
Fix spelling errors
2014-10-13 11:38:13 +09:00
8c2be4aa85 Merge pull request #62 from jollyroger/fix-stray-chars
Remove stray chars from source files
2014-10-13 11:34:52 +09:00
ccaed9a91c Merge pull request #60 from andrewgaul/check-bucket-disable-fail-on-error
Emit user-friendly log messages on failed CheckBucket requests
2014-10-13 11:33:17 +09:00
a1ca8b7124 Return EIO on failed read
Previously S3fsMultiCurl::MultiRead did not report read errors since
it did not treat failed callback setup as a fatal operation error.
Failed callback setups usually result from exceeding the number of
allowed retries.  Previously cp did not report an error during a
network outage but now does:

$ cp ~/s3-path/s3-file .
cp: error reading ‘/home/gaul/s3-path/s3-file’: Input/output error
cp: failed to extend ‘./s3-file’: Input/output error
2014-10-03 21:30:11 -07:00
6633366218 Fix spelling errors 2014-10-01 13:42:39 +03:00
22ea65f02c Remove stray chars from source files 2014-10-01 13:20:29 +03:00
3d69ee0c30 Emit response on failed CheckBucket requests
This allows callers to diagnose errors like InvalidAccessKeyId and
RequestTimeTooSkewed.
2014-09-28 16:12:53 -07:00
c88a5f38be Disable CURLOPT_FAILONERROR for CheckBucket
curl will not consume the body of a response when CURLOPT_FAILONERROR
is set.  This prevents logging of responses for failed requests.
2014-09-28 16:12:43 -07:00
38e6857824 Merge pull request #56 from s3fs-fuse/version1.78
version number increament.
2014-09-15 22:30:51 +09:00
ca72b9a6d0 version number increament. 2014-09-15 13:26:35 +00:00
44 changed files with 8292 additions and 2839 deletions

27
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,27 @@
#### Additional Information
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
- Version of s3fs being used (s3fs --version)
- _example: 1.0_
- Version of fuse being used (pkg-config --modversion fuse)
- _example: 2.9.4_
- System information (uname -a)
- _command result: uname -a_
- Distro (cat /etc/issue)
- _command result: result_
- s3fs command line used (if applicable)
```
```
- /etc/fstab entry (if applicable):
```
```
- s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
```
```
#### Details about issue

5
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,5 @@
#### Relevant Issue (if applicable)
_If there are Issues related to this PullRequest, please list it._
#### Details
_Please describe the details of PullRequest._

50
.gitignore vendored
View File

@ -1,21 +1,31 @@
*.o
Makefile
Makefile.in
aclocal.m4
autom4te.cache/
config.guess
config.log
config.status
config.sub
configure
depcomp
doc/Makefile
doc/Makefile.in
install-sh
missing
src/.deps/
src/Makefile
src/Makefile.in
src/s3fs
test/Makefile
test/Makefile.in
/Makefile
/Makefile.in
/aclocal.m4
/autom4te.cache/
/config.guess
/config.log
/config.status
/config.sub
/stamp-h1
/config.h
/config.h.in
/config.h.in~
/configure
/depcomp
/test-driver
/compile
/doc/Makefile
/doc/Makefile.in
/install-sh
/missing
/src/.deps/
/src/Makefile
/src/Makefile.in
/src/s3fs
/src/test_*
/test/.deps/
/test/Makefile
/test/Makefile.in
/test/*.log
/default_commit_hash

7
.mailmap Normal file
View File

@ -0,0 +1,7 @@
Adrian Petrescu <apetresc@df820570-a93a-0410-bd06-b72b767a4274>
Adrian Petrescu <apetresc@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
Ben Lemasurier <ben.lemasurier@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
Dan Moore <mooredan@suncup.net@df820570-a93a-0410-bd06-b72b767a4274>
Randy Rizun <rrizun@df820570-a93a-0410-bd06-b72b767a4274>
Randy Rizun <rrizun@rrizun-ThinkPad-T530.(none)>
Takeshi Nakatani <ggtakec@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>

17
.travis.yml Normal file
View File

@ -0,0 +1,17 @@
language: cpp
sudo: required
dist: trusty
cache: apt
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
script:
- ./autogen.sh
- ./configure
- make
- make cppcheck
- make check -C src
- modprobe fuse
- make check -C test
- cat test/test-suite.log

228
ChangeLog
View File

@ -1,6 +1,234 @@
ChangeLog for S3FS
------------------
Version 1.81 -- May 13, 2017
#426 - Updated to correct ChangeLog
#431 - fix typo s/controll/control/
#432 - Include location constraint when creating bucket
#433 - Correct search and replace typo
#440 - Handled all curl error without exiting process - #437
#443 - Fix for leaks during stat cache entry expiry / truncation (#340)
#444 - Add mirror file logic for removing cache file
#447 - added fuse package for mounting via /etc/fstab, fixes #417
#449 - Accept mount options compatible with mtab
#451 - Correct path in README
#454 - Changed for accepting mount options compatible with mtab - #449
#466 - Fixed a bug about could not copy file mode from org file
#471 - Added use_xattr option for #467 and #460
#477 - OS-specific correspondence of the extended attribute header
#483 - Trim symbolic link original path in file
#487 - Split header debugging onto multiple lines for easier reading
#488 - Fixed searching Content-Length without case sensitive - #480
#489 - Changed headers_t map using nocase compare function - #488
#494 - Fix typo s/destroied/destroyed/
#495 - Fix invalid V4 signature on multipart copy requests
#498 - Upgrade to S3Proxy 1.5.1
#502 - Fixed issue#435 branch codes for remaining bugs(2)
#503 - Add missing call to mtime test
#504 - Use describe helper function
#505 - Correct typos
#509 - Use server-provided ETag during complete upload
#511 - Fixed a bug about uploading NULL to some part of the file contents
#512 - Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468
#513 - Added issue and PR templates.
#517 - Update s3fs.1 - removed duplicated word
#520 - Added links for eventual consistency in README.md - #515
#539 - Upgrade to S3Proxy 1.5.2
#540 - Address cppcheck 1.77 warnings
#545 - Changed base cached time of stat_cache_expire option - #523
#546 - Fixed double initialization of SSL library at foreground
#550 - Add umount instruction for unplivileged user
#551 - Updated stat_cache_expire option description - #545
#552 - switch S3fsMultiCurl to use foreground threads
#553 - add TLS cipher suites customization
#554 - cleanup cache directory when running out of disk space
#555 - don't sign empty headers (as they are discarded
#556 - fix multipart upload handling without cache
#557 - Added check_cache_dir_exist option(refixed #347) - #538
#558 - Fixed a bug in logic about truncating stat cache
#560 - Fixed about multipart uploading at no free space related to #509
#567 - Do not send ACL unless overridden
#576 - Added option for complementing lack of stat mode
#578 - Refactored the get_object_attribute function
#579 - Added notsup_compat_dir option
#580 - Enhanced bucket/path parameter check
#582 - Check errors returned in 200 OK responses for put header request
#583 - Updated limit object size in s3fs man page
#585 - Fixed failure to upload/copy with SSE_C and SSE_KMS
#587 - Changed copyright year format for debian pkg
#588 - Default transport to HTTPS
#590 - Updated man page for default_acl option - #567
#593 - Backward compatible for changing default transport to HTTPS
#594 - Check bucket at public bucket and add nocopyapi option automatically
Version 1.80 -- May 29, 2016
#213 - Parse ETag from copy multipart correctly
#215 - Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
#217 - Override install, so that the make install does not install rename_before_close under /test
#219 - Address Coverity errors
#220 - Test removing a non-empty directory
#221 - Compare idiomatically
#222 - Annotate constructors as explicit
#224 - Configure cppcheck
#229 - Convert rename_before_close to a shell script
#231 - Rewrite AutoLock
#232 - Always hold stat_cache_lock when using stat_cache
#233 - Remove IntToStr
#234 - Update README
#235 - Plug leak during complete multipart upload
#237 - Refactor tests into individual functions
#238 - Enable all cppcheck rules
#239 - Update stale Google Code reference in --help
#240 - Enable Content-MD5 during multipart upload part
#243 - Run cppcheck during Travis builds
#245 - Elide duplicate lookups of std::map via iterators
#246 - Unlock during early return in TruncateCache
#247 - Base64 cleanup
#248 - Enable integration tests for Travis
#249 - Silence wget
#250 - s3fs can print version with short commit hash - #228
#251 - Skip xattr tests if utilities are missing
#252 - This fixes an issue with caching when the creation of a subdirectory …
#253 - Added chacking cache dir perms at starting.
#256 - Add no atomic rename to limitations
#257 - Update README.md: Bugfix password file permissions errors
#258 - Update README.md to better explain mount upon boot
#260 - Wrap help text at 80 characters
#261 - Correct help timeouts
#263 - Allow integration testing against Amazon S3
#265 - Fix integration tests
#266 - Cleanup from PR #265
#267 - Added the _netdev option to the fstab example.
#268 - Use 127.0.0.1 not localhost in s3proxy wait loop
#271 - Add support for standard_ia storage class
#274 - Modified man page for storage_class option(#271)
#275 - Changed and cleaned the logic for debug message.
#278 - Supported for SSE KMS(#270)
#280 - Supported a object which is larger than free disk space
#285 - Add test for symlink
#288 - Fixed a bug about head request(copy) for SSE - issue#286
#289 - Print source file in log messages
#291 - File opened with O_TRUNC is not flushed - Issue #290
#293 - Fix a small spelling issue.
#295 - File opened with O_TRUNC is not flushed - changed #291
#300 - Update integration-test-main.sh
#302 - Fix syslog level used by S3FS_PRN_EXIT()
#304 - Fixed a bug about mtime - #299
#306 - Fix read concurrency to work in parallel count
#307 - Fix pthread portability problem
#308 - Changed ensure free disk space as additional change for #306
#309 - Check pthread prtability in configure as additional change for #307
#310 - Update integration-test-main.sh as additional change for #300
#311 - Change error log to debug log in s3fs_read()
#313 - fix gitignore
#319 - Clean up mount point on errors in s3fs_init()
#321 - delete stat cache entry in s3fs_fsync so st_size is refreshed - #320
#323 - Add goofys to references
#328 - Fix v4 signature with use_path_request_style
#329 - Correct multiple issues with GET and v4 signing
#330 - Pass by const reference where possible
#331 - Address various clang warnings
#334 - Bucket host should include port and not path
#336 - update REAME.md for fstab
#338 - Fixed a bug about IAMCRED type could not be retried.
#339 - Updated README.md for fstab example.
#341 - Fix the memory leak issue in fdcache.
#346 - Fix empty directory check against AWS S3
#348 - Integration test summary, continue on error
#350 - Changed cache out logic for stat - #340
#351 - Check cache dirctory path and attributes - #347
#352 - Remove stat file cache dir if specified del_cache - #337
#354 - Supported regex type for additional header format - #343
#355 - Fixed codes about clock_gettime for osx
#356 - Fixed codes about clock_gettime for osx(2)
#357 - Fixed codes about clock_gettime for osx(3)
#359 - Remove optional parameter from Content-Type header - #358
#360 - Fix clock_gettime autotools detection on Linux
#364 - Checked content-type by no case-sensitivity - #363
#371 - Always set stats cache for opened file
#372 - Fixed a bug about etag comparison in stats cache, etc.
#376 - Test for writing after an lseek past end of file
#379 - Fixed a bug about writing sparsed file - #375
#385 - fix typo in curl.cpp: s/returing/returning/
#391 - Update s3fs.1
#394 - Revert "Fixed a bug about writing sparsed file - #375"
#395 - Fixed writing sparsed file - #375,#379,#394
#397 - Supported User-Agent header - #383
#403 - Fix a bug of truncating empty file
#404 - Add curl handler pool to reuse connections
#409 - Fixed 'load_sse_c' option not working - #388
#410 - Allow duplicate key in ahbe_conf - #386
#411 - loading IAM role name automatically(iam_role option) - #387
#415 - Fixed a bug about stat_cache_expire - #382
#420 - Skip early credential checks when iam_role=auto
#422 - Fixes for iam_role=auto
#424 - Added travis CI badge in README.md
#425 - Updated ChangeLog and configure.ac for release 1.80
Version 1.79 -- Jul 19, 2015
issue #60 - Emit user-friendly log messages on failed CheckBucket requests
issue #62 - Remove stray chars from source files
issue #63 - Fix spelling errors
issue #68 - FreeBSD issue
issue #69 - Address clang always true warnings
issue #73 - Small gitignore fixes
issue #74 - url: handle scheme omission
issue #83 - Changed option processing to use strtol() to get a umask
issue #93 - Add simple unit tests for trim functions
issue #100 - CURL handles not properly initialized to use DNS or SSL session caching
issue #101 - Optimized function "bool directory_empty()"
issue #103 - Remove prefix option in s3fs man page - issue#87
issue #104 - fix rename before close
issue #116 - Supported signature version 4
issue #119 - Added new mp_umask option about issue#107, pr#110
issue #124 - Fallback to v2 signatures correctly.
issue #130 - refactor integration tests create/cleanup file
issue #131 - Test ls
issue #132 - Use S3Proxy to run integration tests
issue #134 - Include Content-Type in complete MPU V2 signature
issue #135 - Correct V4 signature for initiate multipart upload
issue #136 - Small fixes to integration tests
issue #137 - Add test for multi-part upload
issue #138 - Fixed bugs, not turn use_cache off and ty to load to end - issue#97
issue #143 - Fixed a bug no use_cache case about fixed #138 - issue#141
issue #144 - Add Travis configuration
issue #146 - add exit handler to cleanup on failures
issue #147 - Use S3Proxy 1.4.0-SNAPSHOT
issue #150 - Fixed a bug not handling fsync - #145
issue #154 - Fixed url-encoding for ampersand etc on sigv4 - Improvement/#149
issue #155 - Fixed a bug: unable to mount bucket subdirectory
issue #156 - Fixed a bug about ssl session sharing with libcurl older 7.23.0 - issue#126
issue #159 - Upgrade to S3Proxy 1.4.0
issue #164 - send the correct Host header when using -o url
issue #165 - Auth v4 refactor
issue #167 - Increased default connecting/reading/writing timeout value
issue #168 - switch to use region specific endpoints to compute correct v4 signature
issue #170 - Reviewed and fixed response codes print in curl.cpp - #157
issue #171 - Support buckets with mixed-case names
issue #173 - Run integration tests via Travis
issue #176 - configure.ac: detect target, if target is darwin (OSX), then #176
issue #177 - Add .mailmap
issue #178 - Update .gitignore
issue #184 - Add usage information for multipart_size
issue #185 - Correct obvious typos in usage and README
issue #190 - Add a no_check_certificate option.
issue #194 - Tilda in a file-name breaks things (EPERM)
issue #198 - Disasble integration tests for Travis
issue #199 - Supported extended attributes(retry)
issue #200 - fixed fallback to sigv2 for bucket create and GCS
issue #202 - Specialize {set,get}xattr for OS X
issue #204 - Add integration test for xattr
issue #207 - Fixed a few small spelling issues.
Version 1.78 -- Sep 15, 2014
issue #29 - Possible to create Debian/Ubuntu packages?(googlecode issue 109)
issue 417(googlecode) - Password file with DOS format is not handled properly
issue #41 - Failed making signature
issue #40 - Moving a directory containing more than 1000 files truncates the directory
issue #49 - use_sse is ignored when creating new files
issue #39 - Support for SSE-C
issue #50 - Cannot find pkg-config when configured with any SSL backend except openssl
Version 1.77 -- Apr 19, 2014
issue 405(googlecode) - enable_content_md5 Input/output error
issue #14 - s3fs -u should return 0 if there are no lost multiparts

View File

@ -19,7 +19,7 @@
######################################################################
SUBDIRS=src test doc
EXTRA_DIST=doc
EXTRA_DIST=doc default_commit_hash
dist-hook:
rm -rf `find $(distdir)/doc -type d -name .svn`
@ -28,3 +28,14 @@ dist-hook:
release : dist ../utils/release.sh
../utils/release.sh $(DIST_ARCHIVES)
cppcheck:
cppcheck --quiet --error-exitcode=1 \
--inline-suppr \
--std=c++03 \
-U CURLE_PEER_FAILED_VERIFICATION \
-U P_tmpdir \
--enable=all \
--suppress=missingIncludeSystem \
--suppress=unusedFunction \
--suppress=variableScope \
src/ test/

0
NEWS
View File

67
README
View File

@ -1,67 +0,0 @@
THIS README CONTAINS OUTDATED INFORMATION - please refer to the wiki or --help
S3FS-Fuse
S3FS is FUSE (File System in User Space) based solution to mount/unmount an Amazon S3 storage buckets and use system commands with S3 just like it was another Hard Disk.
In order to compile s3fs, You'll need the following requirements:
* Kernel-devel packages (or kernel source) installed that is the SAME version of your running kernel
* LibXML2-devel packages
* CURL-devel packages (or compile curl from sources at: curl.haxx.se/ use 7.15.X)
* GCC, GCC-C++
* pkgconfig
* FUSE (>= 2.8.4)
* FUSE Kernel module installed and running (RHEL 4.x/CentOS 4.x users - read below)
* OpenSSL-devel (0.9.8)
GnuTLS(gcrypt and nettle)
NSS
* Git
If you're using YUM or APT to install those packages, then it might require additional packaging, allow it to be installed.
Downloading & Compiling:
------------------------
In order to download s3fs, download from following url:
https://github.com/s3fs-fuse/s3fs-fuse/archive/master.zip
Or clone the following command:
git clone git://github.com/s3fs-fuse/s3fs-fuse.git
Go inside the directory that has been created (s3fs-fuse) and run: ./autogen.sh
This will generate a number of scripts in the project directory, including a configure script which you should run with: ./configure
If configure succeeded, you can now run: make. If it didn't, make sure you meet the dependencies above.
This should compile the code. If everything goes OK, you'll be greated with "ok!" at the end and you'll have a binary file called "s3fs"
in the src/ directory.
As root (you can use su, su -, sudo) do: "make install" -this will copy the "s3fs" binary to /usr/local/bin.
Congratulations. S3fs is now compiled and installed.
Usage:
------
In order to use s3fs, make sure you have the Access Key and the Secret Key handy. (refer to the wiki)
First, create a directory where to mount the S3 bucket you want to use.
Example (as root): mkdir -p /mnt/s3
Then run: s3fs mybucket[:path] /mnt/s3
This will mount your bucket to /mnt/s3. You can do a simple "ls -l /mnt/s3" to see the content of your bucket.
If you want to allow other people access the same bucket in the same machine, you can add "-o allow_other" to read/write/delete content of the bucket.
You can add a fixed mount point in /etc/fstab, here's an example:
s3fs#mybucket /mnt/s3 fuse allow_other 0 0
This will mount upon reboot (or by launching: mount -a) your bucket on your machine.
If that does not work, probably you should specify with "_netdev" option in fstab.
All other options can be read at: https://github.com/s3fs-fuse/s3fs-fuse/wiki/Fuse-Over-Amazon
Known Issues:
-------------
s3fs should be working fine with S3 storage. However, There are couple of limitations:
* Currently s3fs could hang the CPU if you have lots of time-outs. This is *NOT* a fault of s3fs but rather libcurl. This happends when you try to copy thousands of files in 1 session, it doesn't happend when you upload hundreds of files or less.
* CentOS 4.x/RHEL 4.x users - if you use the kernel that shipped with your distribution and didn't upgrade to the latest kernel RedHat/CentOS gives, you might have a problem loading the "fuse" kernel. Please upgrade to the latest kernel (2.6.16 or above) and make sure "fuse" kernel module is compiled and loadable since FUSE requires this kernel module and s3fs requires it as well.
* Moving/renaming/erasing files takes time since the whole file needs to be accessed first. A workaround could be to use s3fs's cache support with the use_cache option.

128
README.md Normal file
View File

@ -0,0 +1,128 @@
s3fs
====
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
[![Build Status](https://travis-ci.org/s3fs-fuse/s3fs-fuse.svg?branch=master)](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
Features
--------
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
* large files via multi-part upload
* renames via server-side copy
* optional server-side encryption
* data integrity via MD5 hashes
* in-memory metadata caching
* local disk data caching
* user-specified regions, including Amazon GovCloud
* authenticate via v2 or v4 signatures
Installation
------------
Ensure you have all the dependencies:
On Ubuntu 14.04:
```
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
```
On CentOS 7:
```
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
```
Compile from master via the following commands:
```
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
```
Examples
--------
Enter your S3 identity and credential in a file `/path/to/passwd`:
```
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
```
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
```
chmod 600 /path/to/passwd
```
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
```
If you encounter any errors, enable debug output:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
```
You can also mount on boot by entering the following line to `/etc/fstab`:
```
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
or
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
```
Note: You may also want to create the global credential file first
```
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
chmod 600 /etc/passwd-s3fs
```
Note2: You may also need to make sure `netfs` service is start on boot
Limitations
-----------
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
* random writes or appends to files require rewriting the entire file
* metadata operations such as listing directories have poor performance due to network latency
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
* no atomic renames of files or directories
* no coordination between multiple clients mounting the same bucket
* no hard links
References
----------
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
Frequently Asked Questions
--------------------------
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
License
-------
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
Licensed under the GNU GPL version 2

View File

@ -19,6 +19,28 @@
#
# See the file ChangeLog for a revision history.
echo "--- Make commit hash file -------"
SHORTHASH="unknown"
type git > /dev/null 2>&1
if [ $? -eq 0 -a -d .git ]; then
RESULT=`git rev-parse --short HEAD`
if [ $? -eq 0 ]; then
SHORTHASH=${RESULT}
fi
fi
echo ${SHORTHASH} > default_commit_hash
echo "--- Finished commit hash file ---"
echo "--- Start autotools -------------"
aclocal \
&& autoheader \
&& automake --add-missing \
&& autoconf
echo "--- Finished autotools ----------"
exit 0

View File

@ -20,16 +20,36 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
AC_INIT(s3fs, 1.77)
AC_INIT(s3fs, 1.81)
AC_CONFIG_HEADER([config.h])
AC_CANONICAL_SYSTEM
AM_INIT_AUTOMAKE()
AM_INIT_AUTOMAKE([foreign])
AC_PROG_CXX
AC_PROG_CC
AC_CHECK_HEADERS([sys/xattr.h])
AC_CHECK_HEADERS([attr/xattr.h])
AC_CHECK_HEADERS([sys/extattr.h])
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
dnl ----------------------------------------------
dnl For OSX
dnl ----------------------------------------------
case "$target" in
*-darwin* )
# Do something specific for mac
min_fuse_version=2.7.3
;;
*)
# Default Case
# assume other supported linux system
min_fuse_version=2.8.4
;;
esac
dnl ----------------------------------------------
dnl Choice SSL library
dnl ----------------------------------------------
@ -156,13 +176,13 @@ dnl
dnl For PKG_CONFIG before checking nss/gnutls.
dnl this is redundant checking, but we need checking before following.
dnl
PKG_CHECK_MODULES([common_lib_checking], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6])
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6])
AC_MSG_CHECKING([compile s3fs with])
case "${auth_lib}" in
openssl)
AC_MSG_RESULT(OpenSSL)
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
;;
gnutls)
AC_MSG_RESULT(GnuTLS-gcrypt)
@ -171,7 +191,7 @@ gnutls)
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
AS_IF([test $gnutls_nettle = 0],
[
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
LIBS="-lgnutls -lgcrypt $LIBS"
AC_MSG_CHECKING([gnutls is build with])
AC_MSG_RESULT(gcrypt)
@ -185,7 +205,7 @@ nettle)
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
AS_IF([test $gnutls_nettle = 1],
[
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
LIBS="-lgnutls -lnettle $LIBS"
AC_MSG_CHECKING([gnutls is build with])
AC_MSG_RESULT(nettle)
@ -194,7 +214,7 @@ nettle)
;;
nss)
AC_MSG_RESULT(NSS)
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
;;
*)
AC_MSG_ERROR([unknown ssl library type.])
@ -207,12 +227,67 @@ AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle])
AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss])
dnl ----------------------------------------------
dnl end of ssl library
dnl check functions
dnl ----------------------------------------------
dnl malloc_trim function
AC_CHECK_FUNCS(malloc_trim, , )
AC_CHECK_FUNCS([malloc_trim])
dnl clock_gettime function(osx)
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
AC_CHECK_FUNCS([clock_gettime])
dnl ----------------------------------------------
dnl check symbols/macros/enums
dnl ----------------------------------------------
dnl PTHREAD_MUTEX_RECURSIVE
AC_MSG_CHECKING([pthread mutex recursive])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <pthread.h>]],
[[int i = PTHREAD_MUTEX_RECURSIVE;]])
],
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE, [Define if you have PTHREAD_MUTEX_RECURSIVE])
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE)
],
[AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <pthread.h>]],
[[int i = PTHREAD_MUTEX_RECURSIVE_NP;]])
],
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE_NP, [Define if you have PTHREAD_MUTEX_RECURSIVE_NP])
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE_NP)
],
[AC_MSG_ERROR([do not have PTHREAD_MUTEX_RECURSIVE symbol])])
]
)
dnl ----------------------------------------------
dnl output files
dnl ----------------------------------------------
AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile)
dnl ----------------------------------------------
dnl short commit hash
dnl ----------------------------------------------
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no])
AC_MSG_CHECKING([github short commit hash])
if test “x${GITCMD}” = “xyes” -a “x${DOTGITDIR}” = “xyes”; then
GITCOMMITHASH=`git rev-parse --short HEAD`
elif test -f default_commit_hash; then
GITCOMMITHASH=`cat default_commit_hash`
else
GITCOMMITHASH="unknown"
fi
AC_MSG_RESULT([${GITCOMMITHASH}])
AC_DEFINE_UNQUOTED([COMMIT_HASH_VAL], ["${GITCOMMITHASH}"], [short commit hash value on github])
dnl ----------------------------------------------
dnl put
dnl ----------------------------------------------
AC_OUTPUT
dnl ----------------------------------------------
dnl end configuration
dnl ----------------------------------------------

View File

@ -5,12 +5,18 @@ S3FS \- FUSE-based file system backed by Amazon S3
.SS mounting
.TP
\fBs3fs bucket[:/path] mountpoint \fP [options]
.TP
\fBs3fs mountpoint \fP [options(must specify bucket= option)]
.SS unmounting
.TP
\fBumount mountpoint
For root.
.TP
\fBfusermount -u mountpoint
For unprivileged user.
.SS utility mode ( remove interrupted multipart uploading objects )
.TP
\fBs3fs -u bucket
\fBs3fs \-u bucket
.SH DESCRIPTION
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
.SH AUTHENTICATION
@ -48,13 +54,13 @@ FUSE singlethreaded option (disables multi-threaded operation)
All s3fs options must given in the form where "opt" is:
<option_name>=<option_value>
.TP
\fB\-o\fR default_acl (default="private")
the default canned acl to apply to all written S3 objects, e.g., "public-read".
Any created files will have this canned acl.
Any updated files will also have this canned acl applied!
\fB\-o\fR bucket
if it is not specified bucket name(and path) in command line, must specify this option after \-o option for bucket name.
.TP
\fB\-o\fR prefix (default="") (coming soon!)
a prefix to append to all S3 objects.
\fB\-o\fR default_acl (default="private")
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
empty string means do not send header.
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
.TP
\fB\-o\fR retries (default="2")
number of times to retry a failed S3 transaction.
@ -62,22 +68,48 @@ number of times to retry a failed S3 transaction.
\fB\-o\fR use_cache (default="" which means disabled)
local folder to use for local file cache.
.TP
\fB\-o\fR check_cache_dir_exist (default is disable)
If use_cache is set, check if the cache directory exists.
If this option is not specified, it will be created at runtime when the cache directory does not exist.
.TP
\fB\-o\fR del_cache - delete local file cache
delete local file cache when s3fs starts and exits.
.TP
\fB\-o\fR storage_class (default is standard)
store object with specified storage class.
this option replaces the old option use_rrs.
Possible values: standard, standard_ia, and reduced_redundancy.
.TP
\fB\-o\fR use_rrs (default is disable)
use Amazon's Reduced Redundancy Storage.
this option can not be specified with use_sse.
(can specify use_rrs=1 for old version)
this option has been replaced by new storage_class option.
.TP
\fB\-o\fR use_sse (default is disable)
use Amazon<EFBFBD>fs Server-Site Encryption or Server-Side Encryption with Customer-Provided Encryption Keys.
this option can not be specified with use_rrs. specifying only "use_sse" or "use_sse=1" enables Server-Side Encryption.(use_sse=1 for old version)
specifying this option with file path which has some SSE-C secret key enables Server-Side Encryption with Customer-Provided Encryption Keys.(use_sse=file)
the file must be 600 permission. the file can have some lines, each line is one SSE-C key. the first line in file is used as Customer-Provided Encryption Keys for uploading and change headers etc.
if there are some keys after first line, those are used downloading object which are encripted by not first key.
so that, you can keep all SSE-C keys in file, that is SSE-C key history.
if AWSSSECKEYS environment is set, you can set SSE-C key instead of this option.
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>"(only <custom key file path> specified is old type parameter).
You can use "c" for short "custom".
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
This option is used to decide the SSE type.
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
You can use "k" for short "kmsid".
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
You must be careful about that you can not use the KMS id which is not same EC2 region.
.TP
\fB\-o\fR load_sse_c - specify SSE-C keys
Specify the custom-provided encryption keys file path for decrypting at downloading.
If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom".
The file has many lines, one line means one custom key.
So that you can keep all SSE-C keys in file, that is SSE-C key history.
AWSSSECKEYS environment is as same as this file contents.
.TP
\fB\-o\fR passwd_file (default="")
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
@ -86,34 +118,40 @@ specify the path to the password file, which which takes precedence over the pas
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
The configuration file format is below:
-----------
line = [file suffix] HTTP-header [HTTP-values]
file suffix = file(object) suffix, if this field is empty, it means "*"(all object).
line = [file suffix or regex] HTTP-header [HTTP-values]
file suffix = file(object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
regex = regular expression to match the file(object) path. this type starts with "reg:" prefix.
HTTP-header = additional HTTP header name
HTTP-values = additional HTTP header value
-----------
Sample:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
.gz Content-Encoding gzip
.Z Content-Encoding compress
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
-----------
A sample configuration file is uploaded in "test" directory.
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
.TP
\fB\-o\fR public_bucket (default="" which means disabled)
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
.TP
\fB\-o\fR connect_timeout (default="10" seconds)
\fB\-o\fR connect_timeout (default="300" seconds)
time to wait for connection before giving up.
.TP
\fB\-o\fR readwrite_timeout (default="30" seconds)
\fB\-o\fR readwrite_timeout (default="60" seconds)
time to wait between read/write activity before giving up.
.TP
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
maximum number of entries in the stat cache
.TP
\fB\-o\fR stat_cache_expire (default is no expire)
specify expire time(seconds) for entries in the stat cache
specify expire time(seconds) for entries in the stat cache. This expire time indicates the time since stat cached.
.TP
\fB\-o\fR stat_cache_interval_expire (default is no expire)
specify expire time(seconds) for entries in the stat cache. This expire time is based on the time from the last access time of the stat cache.
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
.TP
\fB\-o\fR enable_noobj_cache (default is disable)
enable cache entries for the object which does not exist.
@ -121,6 +159,10 @@ s3fs always has to check whether file(or sub directory) exists under object(path
It increases ListBucket request and makes performance bad.
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
.TP
\fB\-o\fR no_check_certificate (by default this option is disabled)
do not check ssl certificate.
server certificate won't be checked against the available certificate authorities.
.TP
\fB\-o\fR nodnscache - disable dns cache.
s3fs is always using dns cache, this option make dns cache disable.
.TP
@ -135,22 +177,39 @@ number of parallel request for uploading big objects.
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
This option limits parallel request count which s3fs requests at once.
It is necessary to set this value depending on a CPU and a network band.
This option is lated to fd_page_size option and affects it.
.TP
\fB\-o\fR fd_page_size(default="52428800"(50MB))
number of internal management page size for each file discriptor.
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
This option should not be changed when you don't have a trouble with performance.
This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size).
.TP
\fB\-o\fR multipart_size(default="10"(10MB))
number of one part size in multipart uploading request.
The default size is 10MB(10485760byte), this value is minimum size.
Specify number of MB and over 10(MB).
This option is lated to fd_page_size option and affects it.
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
Specify number of MB and over 5(MB).
.TP
\fB\-o\fR url (default="http://s3.amazonaws.com")
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
s3fs makes file for downloading, and uploading and caching files.
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
.TP
\fB\-o\fR url (default="https://s3.amazonaws.com")
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
If you start s3fs without specifying the url option, s3fs will check the bucket using https://s3.amazonaws.com.
And when bucket check fails, s3fs retries the bucket check using http://s3.amazonaws.com.
This is the function left behind for backward compatibility.
If you do not use https, please specify the URL with the url option.
.TP
\fB\-o\fR endpoint (default="us-east-1")
sets the endpoint to use.
If this option is not specified, s3fs uses "us-east-1" region as the default.
If the s3fs could not connect to the region specified by this option, s3fs could not run.
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
.TP
\fB\-o\fR sigv2 (default is signature version 4)
sets signing AWS requests by sing Signature Version 2.
.TP
\fB\-o\fR mp_umask (default is "0000")
sets umask for the mount point directory.
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
In the opposite case s3fs allows access to all users as the default.
But if you set the allow_other with this option, you can control permissions of the mount point by this option like umask.
.TP
\fB\-o\fR nomultipart - disable multipart uploads
.TP
@ -160,11 +219,17 @@ Enable to send "Content-MD5" header when uploading a object without multipart po
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
.TP
\fB\-o\fR iam_role ( default is no role )
set the IAM Role that will supply the credentials from the instance meta-data.
\fB\-o\fR iam_role ( default is no IAM role )
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
.TP
\fB\-o\fR noxmlns - disable registing xml name space.
disable registing xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
Enable to handle the extended attribute(xattrs).
If you set this option, you can use the extended attribute.
For example, encfs and ecryptfs need to support the extended attribute.
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
.TP
\fB\-o\fR noxmlns - disable registering xml name space.
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
.TP
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
@ -174,18 +239,50 @@ If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api).
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
For a distributed object storage which is compatibility S3 API without PUT(copy api).
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
If this option is specified with nocopapi, the s3fs ignores it.
If this option is specified with nocopyapi, then s3fs ignores it.
.TP
\fB\-o\fR use_path_request_style (use legacy API calling style)
Enble compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
.TP
\fB\-o\fR noua (suppress User-Agent header)
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
If this option is specified, s3fs suppresses the output of the User-Agent.
.TP
\fB\-o\fR cipher_suites
Customize TLS cipher suite list. Expects a colon separated list of cipher suite names.
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
https://curl.haxx.se/docs/ssl-ciphers.html
.TP
\fB\-o\fR complement_stat (complement lack of file/directory mode)
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
.TP
\fB\-o\fR notsup_compat_dir (not support compatibility directory types)
As a default, s3fs supports objects of the directory type as much as possible and recognizes them as directories.
Objects that can be recognized as directory objects are "dir/", "dir", "dir_$folder$", and there is a file object that does not have a directory object but contains that directory path.
s3fs needs redundant communication to support all these directory types.
The object as the directory created by s3fs is "dir/".
By restricting s3fs to recognize only "dir/" as a directory, communication traffic can be reduced.
This option is used to give this restriction to s3fs.
However, if there is a directory object other than "dir/" in the bucket, specifying this option is not recommended.
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
Please use this option when the directory in the bucket is only "dir/" object.
.TP
\fB\-o\fR dbglevel (default="crit")
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
If s3fs run with "-d" option, the debug level is set information.
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
.TP
\fB\-o\fR curldbg - put curl debug message
Put the debug message from libcurl when this option is specified.
.SH FUSE/MOUNT OPTIONS
.TP
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '-onodev,nosuid' by default, which can only be overridden by a privileged user.
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
.TP
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
.SH NOTES
.TP
Maximum file size=64GB (limited by s3fs, not Amazon).
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
.TP
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
.TP

View File

@ -24,7 +24,7 @@ if USE_GNUTLS_NETTLE
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
endif
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h common.h
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h addhead.cpp addhead.h common.h
if USE_SSL_OPENSSL
s3fs_SOURCES += openssl_auth.cpp
endif
@ -37,3 +37,8 @@ endif
s3fs_LDADD = $(DEPS_LIBS)
noinst_PROGRAMS = test_string_util
test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h
TESTS = test_string_util

286
src/addhead.cpp Normal file
View File

@ -0,0 +1,286 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <assert.h>
#include <curl/curl.h>
#include <sstream>
#include <fstream>
#include <string>
#include <map>
#include <list>
#include <vector>
#include "common.h"
#include "addhead.h"
#include "curl.h"
#include "s3fs.h"
using namespace std;
//-------------------------------------------------------------------
// Symbols
//-------------------------------------------------------------------
#define ADD_HEAD_REGEX "reg:"
//-------------------------------------------------------------------
// Class AdditionalHeader
//-------------------------------------------------------------------
AdditionalHeader AdditionalHeader::singleton;
//-------------------------------------------------------------------
// Class AdditionalHeader method
//-------------------------------------------------------------------
AdditionalHeader::AdditionalHeader()
{
if(this == AdditionalHeader::get()){
is_enable = false;
}else{
assert(false);
}
}
AdditionalHeader::~AdditionalHeader()
{
if(this == AdditionalHeader::get()){
Unload();
}else{
assert(false);
}
}
bool AdditionalHeader::Load(const char* file)
{
if(!file){
S3FS_PRN_WARN("file is NULL.");
return false;
}
Unload();
ifstream AH(file);
if(!AH.good()){
S3FS_PRN_WARN("Could not open file(%s).", file);
return false;
}
// read file
string line;
PADDHEAD paddhead;
while(getline(AH, line)){
if('#' == line[0]){
continue;
}
if(0 == line.size()){
continue;
}
// load a line
stringstream ss(line);
string key(""); // suffix(key)
string head; // additional HTTP header
string value; // header value
if(0 == isblank(line[0])){
ss >> key;
}
if(ss){
ss >> head;
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
value = line.substr(static_cast<int>(ss.tellg()) + 1);
}
}
// check it
if(0 == head.size()){
if(0 == key.size()){
continue;
}
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
Unload();
return false;
}
paddhead = new ADDHEAD;
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
// regex
if(key.size() <= strlen(ADD_HEAD_REGEX)){
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
continue;
}
key = key.substr(strlen(ADD_HEAD_REGEX));
// compile
regex_t* preg = new regex_t;
int result;
char errbuf[256];
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
regerror(result, preg, errbuf, sizeof(errbuf));
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
delete preg;
delete paddhead;
continue;
}
// set
paddhead->pregex = preg;
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
}else{
// not regex, directly comparing
paddhead->pregex = NULL;
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
}
// add list
addheadlist.push_back(paddhead);
// set flag
if(!is_enable){
is_enable = true;
}
}
return true;
}
void AdditionalHeader::Unload(void)
{
is_enable = false;
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); iter = addheadlist.erase(iter)){
PADDHEAD paddhead = *iter;
if(paddhead){
if(paddhead->pregex){
regfree(paddhead->pregex);
delete paddhead->pregex;
}
delete paddhead;
}
}
}
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
{
if(!is_enable){
return true;
}
if(!path){
S3FS_PRN_WARN("path is NULL.");
return false;
}
size_t pathlength = strlen(path);
// loop
//
// [NOTE]
// Because to allow duplicate key, and then scanning the entire table.
//
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
const PADDHEAD paddhead = *iter;
if(!paddhead){
continue;
}
if(paddhead->pregex){
// regex
regmatch_t match; // not use
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
}else{
// directly comparing
if(paddhead->basestring.length() < pathlength){
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
}
}
}
return true;
}
struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const
{
headers_t meta;
if(!AddHeader(meta, path)){
return list;
}
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
// Adding header
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
}
meta.clear();
S3FS_MALLOCTRIM(0);
return list;
}
bool AdditionalHeader::Dump(void) const
{
if(!IS_S3FS_LOG_DBG()){
return true;
}
stringstream ssdbg;
int cnt = 1;
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
const PADDHEAD paddhead = *iter;
ssdbg << " [" << cnt << "] = {" << endl;
if(paddhead){
if(paddhead->pregex){
ssdbg << " type\t\t--->\tregex" << endl;
}else{
ssdbg << " type\t\t--->\tsuffix matching" << endl;
}
ssdbg << " base string\t--->\t" << paddhead->basestring << endl;
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl;
}
ssdbg << " }" << endl;
}
ssdbg << "}" << endl;
// print all
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
return true;
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

70
src/addhead.h Normal file
View File

@ -0,0 +1,70 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_ADDHEAD_H_
#define S3FS_ADDHEAD_H_
#include <regex.h>
//----------------------------------------------
// class AdditionalHeader
//----------------------------------------------
typedef struct add_header{
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
std::string basestring;
std::string headkey;
std::string headvalue;
}ADDHEAD, *PADDHEAD;
typedef std::vector<PADDHEAD> addheadlist_t;
class AdditionalHeader
{
private:
static AdditionalHeader singleton;
bool is_enable;
addheadlist_t addheadlist;
protected:
AdditionalHeader();
~AdditionalHeader();
public:
// Reference singleton
static AdditionalHeader* get(void) { return &singleton; }
bool Load(const char* file);
void Unload(void);
bool AddHeader(headers_t& meta, const char* path) const;
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
bool Dump(void) const;
};
#endif // S3FS_ADDHEAD_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -21,6 +21,9 @@
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#ifndef HAVE_CLOCK_GETTIME
#include <sys/time.h>
#endif
#include <unistd.h>
#include <stdint.h>
#include <pthread.h>
@ -29,15 +32,107 @@
#include <syslog.h>
#include <string>
#include <map>
#include <vector>
#include <algorithm>
#include <list>
#include "cache.h"
#include "s3fs.h"
#include "s3fs_util.h"
#include "string_util.h"
using namespace std;
//-------------------------------------------------------------------
// Utility
//-------------------------------------------------------------------
#ifndef CLOCK_REALTIME
#define CLOCK_REALTIME 0
#endif
#ifndef CLOCK_MONOTONIC
#define CLOCK_MONOTONIC CLOCK_REALTIME
#endif
#ifndef CLOCK_MONOTONIC_COARSE
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#endif
#ifdef HAVE_CLOCK_GETTIME
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
{
return clock_gettime(clk_id, ts);
}
#else
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
{
struct timeval now;
if(0 != gettimeofday(&now, NULL)){
return -1;
}
ts->tv_sec = now.tv_sec;
ts->tv_nsec = now.tv_usec * 1000;
return 0;
}
#endif
inline void SetStatCacheTime(struct timespec& ts)
{
if(-1 == s3fs_clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){
ts.tv_sec = time(NULL);
ts.tv_nsec = 0;
}
}
inline void InitStatCacheTime(struct timespec& ts)
{
ts.tv_sec = 0;
ts.tv_nsec = 0;
}
inline int CompareStatCacheTime(struct timespec& ts1, struct timespec& ts2)
{
// return -1: ts1 < ts2
// 0: ts1 == ts2
// 1: ts1 > ts2
if(ts1.tv_sec < ts2.tv_sec){
return -1;
}else if(ts1.tv_sec > ts2.tv_sec){
return 1;
}else{
if(ts1.tv_nsec < ts2.tv_nsec){
return -1;
}else if(ts1.tv_nsec > ts2.tv_nsec){
return 1;
}
}
return 0;
}
inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expire)
{
struct timespec nowts;
SetStatCacheTime(nowts);
return ((ts.tv_sec + expire) < nowts.tv_sec);
}
//
// For cache out
//
typedef std::vector<stat_cache_t::iterator> statiterlist_t;
struct sort_statiterlist{
// ascending order
bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const
{
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date);
if(0 == result){
if(src1->second->hit_count < src2->second->hit_count){
result = -1;
}
}
return (result < 0);
}
};
//-------------------------------------------------------------------
// Static
//-------------------------------------------------------------------
@ -47,7 +142,7 @@ pthread_mutex_t StatCache::stat_cache_lock;
//-------------------------------------------------------------------
// Constructor/Destructor
//-------------------------------------------------------------------
StatCache::StatCache() : IsExpireTime(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
{
if(this == StatCache::getStatCacheData()){
stat_cache.clear();
@ -87,19 +182,21 @@ time_t StatCache::GetExpireTime(void) const
return (IsExpireTime ? ExpireTime : (-1));
}
time_t StatCache::SetExpireTime(time_t expire)
time_t StatCache::SetExpireTime(time_t expire, bool is_interval)
{
time_t old = ExpireTime;
ExpireTime = expire;
IsExpireTime = true;
time_t old = ExpireTime;
ExpireTime = expire;
IsExpireTime = true;
IsExpireIntervalType = is_interval;
return old;
}
time_t StatCache::UnsetExpireTime(void)
{
time_t old = IsExpireTime ? ExpireTime : (-1);
ExpireTime = 0;
IsExpireTime = false;
time_t old = IsExpireTime ? ExpireTime : (-1);
ExpireTime = 0;
IsExpireTime = false;
IsExpireIntervalType = false;
return old;
}
@ -143,7 +240,7 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
if(iter != stat_cache.end() && (*iter).second){
stat_cache_entry* ent = (*iter).second;
if(!IsExpireTime|| (ent->cache_date + ExpireTime) >= time(NULL)){
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
if(ent->noobjcache){
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(!IsCacheNoObject){
@ -155,19 +252,28 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
return false;
}
// hit without checking etag
string stretag;
if(petag){
string stretag = ent->meta["ETag"];
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
is_delete_cache = true;
// find & check ETag
for(headers_t::iterator iter = ent->meta.begin(); iter != ent->meta.end(); ++iter){
string tag = lower(iter->first);
if(tag == "etag"){
stretag = iter->second;
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
is_delete_cache = true;
}
break;
}
}
}
if(is_delete_cache){
// not hit by different ETag
DPRNNN("stat cache not hit by ETag[path=%s][time=%jd][hit count=%lu][ETag(%s)!=(%s)]",
strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count, petag ? petag : "null", ent->meta["ETag"].c_str());
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%jd.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
}else{
// hit
DPRNNN("stat cache hit [path=%s][time=%jd][hit count=%lu]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count);
S3FS_PRN_DBG("stat cache hit [path=%s][time=%jd.%09ld][hit count=%lu]",
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
if(pst!= NULL){
*pst= ent->stbuf;
@ -179,7 +285,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
(*pisforce) = ent->isforce;
}
ent->hit_count++;
ent->cache_date = time(NULL);
if(IsExpireIntervalType){
SetStatCacheTime(ent->cache_date);
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
@ -219,10 +328,10 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
}
if(iter != stat_cache.end() && (*iter).second) {
if(!IsExpireTime|| ((*iter).second->cache_date + ExpireTime) >= time(NULL)){
if(!IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){
if((*iter).second->noobjcache){
// noobjcache = true means no object.
(*iter).second->cache_date = time(NULL);
SetStatCacheTime((*iter).second->cache_date);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
@ -239,17 +348,24 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
return false;
}
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
{
if(CacheSize< 1){
if(!no_truncate && CacheSize< 1){
return true;
}
DPRNNN("add stat cache entry[path=%s]", key.c_str());
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
if(stat_cache.end() != stat_cache.find(key)){
pthread_mutex_lock(&StatCache::stat_cache_lock);
bool found = stat_cache.end() != stat_cache.find(key);
bool do_truncate = stat_cache.size() > CacheSize;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(found){
DelStat(key.c_str());
}else{
if(stat_cache.size() > CacheSize){
if(do_truncate){
if(!TruncateCache()){
return false;
}
@ -263,35 +379,40 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
return false;
}
ent->hit_count = 0;
ent->cache_date = time(NULL); // Set time.
ent->isforce = forcedir;
ent->noobjcache = false;
ent->notruncate = (no_truncate ? 1L : 0L);
ent->meta.clear();
SetStatCacheTime(ent->cache_date); // Set time.
//copy only some keys
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
string tag = (*iter).first;
string value = (*iter).second;
if(tag == "Content-Type"){
ent->meta[tag] = value;
}else if(tag == "Content-Length"){
ent->meta[tag] = value;
}else if(tag == "ETag"){
ent->meta[tag] = value;
}else if(tag == "Last-Modified"){
ent->meta[tag] = value;
string tag = lower(iter->first);
string value = iter->second;
if(tag == "content-type"){
ent->meta[iter->first] = value;
}else if(tag == "content-length"){
ent->meta[iter->first] = value;
}else if(tag == "etag"){
ent->meta[iter->first] = value;
}else if(tag == "last-modified"){
ent->meta[iter->first] = value;
}else if(tag.substr(0, 5) == "x-amz"){
ent->meta[tag] = value;
}else{
// Check for upper case
transform(tag.begin(), tag.end(), tag.begin(), static_cast<int (*)(int)>(std::tolower));
if(tag.substr(0, 5) == "x-amz"){
ent->meta[tag] = value;
}
ent->meta[tag] = value; // key is lower case for "x-amz"
}
}
// add
pthread_mutex_lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
if(stat_cache.end() != iter){
if(iter->second){
delete iter->second;
}
stat_cache.erase(iter);
}
stat_cache[key] = ent;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
@ -305,12 +426,19 @@ bool StatCache::AddNoObjectCache(string& key)
if(CacheSize < 1){
return true;
}
DPRNNN("add no object cache entry[path=%s]", key.c_str());
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
if(stat_cache.end() != stat_cache.find(key)){
pthread_mutex_lock(&StatCache::stat_cache_lock);
bool found = stat_cache.end() != stat_cache.find(key);
bool do_truncate = stat_cache.size() > CacheSize;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(found){
DelStat(key.c_str());
}else{
if(stat_cache.size() > CacheSize){
if(do_truncate){
if(!TruncateCache()){
return false;
}
@ -321,47 +449,109 @@ bool StatCache::AddNoObjectCache(string& key)
stat_cache_entry* ent = new stat_cache_entry();
memset(&(ent->stbuf), 0, sizeof(struct stat));
ent->hit_count = 0;
ent->cache_date = time(NULL); // Set time.
ent->isforce = false;
ent->noobjcache = true;
ent->notruncate = 0L;
ent->meta.clear();
SetStatCacheTime(ent->cache_date); // Set time.
// add
pthread_mutex_lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
if(stat_cache.end() != iter){
if(iter->second){
delete iter->second;
}
stat_cache.erase(iter);
}
stat_cache[key] = ent;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
{
pthread_mutex_lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key);
if(stat_cache.end() != iter){
stat_cache_entry* ent = iter->second;
if(ent){
if(no_truncate){
++(ent->notruncate);
}else{
if(0L < ent->notruncate){
--(ent->notruncate);
}
}
}
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
}
bool StatCache::TruncateCache(void)
{
if(0 == stat_cache.size()){
if(stat_cache.empty()){
return true;
}
pthread_mutex_lock(&StatCache::stat_cache_lock);
time_t lowest_time = time(NULL) + 1;
stat_cache_t::iterator iter_to_delete = stat_cache.end();
stat_cache_t::iterator iter;
for(iter = stat_cache.begin(); iter != stat_cache.end(); iter++) {
if((*iter).second){
if(lowest_time > (*iter).second->cache_date){
lowest_time = (*iter).second->cache_date;
iter_to_delete = iter;
// 1) erase over expire time
if(IsExpireTime){
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
stat_cache_entry* entry = iter->second;
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
if(entry){
delete entry;
}
stat_cache.erase(iter++);
}else{
++iter;
}
}
}
if(stat_cache.end() != iter_to_delete){
DPRNNN("truncate stat cache[path=%s]", (*iter_to_delete).first.c_str());
if((*iter_to_delete).second){
delete (*iter_to_delete).second;
}
stat_cache.erase(iter_to_delete);
S3FS_MALLOCTRIM(0);
// 2) check stat cache count
if(stat_cache.size() < CacheSize){
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
// 3) erase from the old cache in order
size_t erase_count= stat_cache.size() - CacheSize + 1;
statiterlist_t erase_iters;
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
// check no truncate
stat_cache_entry* ent = iter->second;
if(ent && 0L < ent->notruncate){
// skip for no truncate entry
if(0 < erase_count){
--erase_count; // decrement
}
}
// iter is not have notruncate flag
erase_iters.push_back(iter);
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
if(erase_count < erase_iters.size()){
erase_iters.pop_back();
}
}
for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
stat_cache_t::iterator siter = *iiter;
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
if(siter->second){
delete siter->second;
}
stat_cache.erase(siter);
}
S3FS_MALLOCTRIM(0);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
@ -372,7 +562,7 @@ bool StatCache::DelStat(const char* key)
if(!key){
return false;
}
DPRNNN("delete stat cache entry[path=%s]", key);
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
pthread_mutex_lock(&StatCache::stat_cache_lock);

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -17,6 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CACHE_H_
#define S3FS_CACHE_H_
@ -26,15 +27,18 @@
// Struct
//
struct stat_cache_entry {
struct stat stbuf;
unsigned long hit_count;
time_t cache_date;
headers_t meta;
bool isforce;
bool noobjcache; // Flag: cache is no object for no listing.
struct stat stbuf;
unsigned long hit_count;
struct timespec cache_date;
headers_t meta;
bool isforce;
bool noobjcache; // Flag: cache is no object for no listing.
unsigned long notruncate; // 0<: not remove automatically at checking truncate
stat_cache_entry() : hit_count(0), cache_date(0), isforce(false), noobjcache(false) {
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) {
memset(&stbuf, 0, sizeof(struct stat));
cache_date.tv_sec = 0;
cache_date.tv_nsec = 0;
meta.clear();
}
};
@ -51,20 +55,21 @@ class StatCache
static pthread_mutex_t stat_cache_lock;
stat_cache_t stat_cache;
bool IsExpireTime;
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
time_t ExpireTime;
unsigned long CacheSize;
bool IsCacheNoObject;
private:
StatCache();
~StatCache();
void Clear(void);
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
// Truncate stat cache
bool TruncateCache(void);
public:
StatCache();
~StatCache();
// Reference singleton
static StatCache* getStatCacheData(void) {
return &singleton;
@ -74,7 +79,7 @@ class StatCache
unsigned long GetCacheSize(void) const;
unsigned long SetCacheSize(unsigned long size);
time_t GetExpireTime(void) const;
time_t SetExpireTime(time_t expire);
time_t SetExpireTime(time_t expire, bool is_interval = false);
time_t UnsetExpireTime(void);
bool SetCacheNoObject(bool flag);
bool EnableCacheNoObject(void) {
@ -109,7 +114,10 @@ class StatCache
bool AddNoObjectCache(std::string& key);
// Add stat cache
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false);
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
// Change no truncate flag
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
// Delete stat cache
bool DelStat(const char* key);

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -17,83 +17,151 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_COMMON_H_
#define S3FS_COMMON_H_
#include "../config.h"
//
// Extended attribute
//
#ifdef HAVE_SYS_EXTATTR_H
#include <sys/extattr.h>
#elif HAVE_ATTR_XATTR_H
#include <attr/xattr.h>
#elif HAVE_SYS_XATTR_H
#include <sys/xattr.h>
#endif
//
// Macro
//
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
// for debug
#define FPRINT_NEST_SPACE_0 ""
#define FPRINT_NEST_SPACE_1 " "
#define FPRINT_NEST_SPACE_2 " "
#define FPRINT_NEST_CHECK(NEST) \
(0 == NEST ? FPRINT_NEST_SPACE_0 : 1 == NEST ? FPRINT_NEST_SPACE_1 : FPRINT_NEST_SPACE_2)
//
// Debug level
//
enum s3fs_log_level{
S3FS_LOG_CRIT = 0, // LOG_CRIT
S3FS_LOG_ERR = 1, // LOG_ERR
S3FS_LOG_WARN = 3, // LOG_WARNING
S3FS_LOG_INFO = 7, // LOG_INFO
S3FS_LOG_DBG = 15 // LOG_DEBUG
};
#define LOWFPRINT(NEST, ...) \
printf("%s%s(%d): ", FPRINT_NEST_CHECK(NEST), __func__, __LINE__); \
printf(__VA_ARGS__); \
printf("\n"); \
//
// Debug macros
//
#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level)
#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG))
#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG))
#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG))
#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG))
#define FPRINT(NEST, ...) \
if(foreground){ \
LOWFPRINT(NEST, __VA_ARGS__); \
}
#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT )
#define FPRINT2(NEST, ...) \
if(foreground2){ \
LOWFPRINT(NEST, __VA_ARGS__); \
}
#define S3FS_LOG_LEVEL_STRING(level) \
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " )
#define LOWSYSLOGPRINT(LEVEL, ...) \
syslog(LEVEL, __VA_ARGS__);
#define S3FS_LOG_NEST_MAX 4
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
#define SYSLOGPRINT(LEVEL, ...) \
if(LEVEL <= LOG_CRIT || debug){ \
LOWSYSLOGPRINT(LEVEL, __VA_ARGS__); \
}
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
if(foreground){ \
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
} \
}
#define DPRINT(LEVEL, NEST, ...) \
FPRINT(NEST, __VA_ARGS__); \
SYSLOGPRINT(LEVEL, __VA_ARGS__);
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
if(foreground){ \
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
} \
}
#define DPRINT2(LEVEL, ...) \
FPRINT2(2, __VA_ARGS__); \
SYSLOGPRINT(LEVEL, __VA_ARGS__);
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
if(foreground){ \
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
}else{ \
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
}
// print debug message
#define FPRN(...) FPRINT(0, __VA_ARGS__)
#define FPRNN(...) FPRINT(1, __VA_ARGS__)
#define FPRNNN(...) FPRINT(2, __VA_ARGS__)
#define FPRNINFO(...) FPRINT2(2, __VA_ARGS__)
// print debug message with putting syslog
#define DPRNCRIT(...) DPRINT(LOG_CRIT, 0, __VA_ARGS__)
#define DPRN(...) DPRINT(LOG_ERR, 0, __VA_ARGS__)
#define DPRNN(...) DPRINT(LOG_DEBUG, 1, __VA_ARGS__)
#define DPRNNN(...) DPRINT(LOG_DEBUG, 2, __VA_ARGS__)
#define DPRNINFO(...) DPRINT2(LOG_INFO, __VA_ARGS__)
// [NOTE]
// small trick for VA_ARGS
//
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__)
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "")
//
// Typedef
//
typedef std::map<std::string, std::string> headers_t;
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>{
bool operator()(const std::string &strleft, const std::string &strright) const
{
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
}
};
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
//
// Global valiables
// Header "x-amz-meta-xattr" is for extended attributes.
// This header is url encoded string which is json formatted.
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
//
extern bool debug;
extern bool foreground;
extern bool foreground2;
extern bool nomultipart;
extern bool pathrequeststyle;
extern std::string program_name;
extern std::string service_path;
extern std::string host;
extern std::string bucket;
extern std::string mount_prefix;
typedef struct xattr_value{
unsigned char* pvalue;
size_t length;
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
~xattr_value()
{
if(pvalue){
free(pvalue);
}
}
}XATTRVAL, *PXATTRVAL;
typedef std::map<std::string, PXATTRVAL> xattrs_t;
//
// Global variables
//
extern bool foreground;
extern bool nomultipart;
extern bool pathrequeststyle;
extern bool complement_stat;
extern std::string program_name;
extern std::string service_path;
extern std::string host;
extern std::string bucket;
extern std::string mount_prefix;
extern std::string endpoint;
extern std::string cipher_suites;
extern s3fs_log_level debug_level;
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
#endif // S3FS_COMMON_H_

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -18,49 +18,20 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include "s3fs_auth.h"
#include "string_util.h"
using namespace std;
//-------------------------------------------------------------------
// Utility Function
//-------------------------------------------------------------------
char* s3fs_base64(unsigned char* input, size_t length)
{
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
char* result;
if(!input || 0 >= length){
return NULL;
}
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
parts[0] = (input[rpos] & 0xfc) >> 2;
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
result[wpos++] = base[parts[0]];
result[wpos++] = base[parts[1]];
result[wpos++] = base[parts[2]];
result[wpos++] = base[parts[3]];
}
result[wpos] = '\0';
return result;
}
string s3fs_get_content_md5(int fd)
{
unsigned char* md5hex;
@ -84,22 +55,37 @@ string s3fs_get_content_md5(int fd)
string s3fs_md5sum(int fd, off_t start, ssize_t size)
{
size_t digestlen = get_md5_digest_length();
char md5[2 * digestlen + 1];
char hexbuf[3];
unsigned char* md5hex;
if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){
return string("");
}
memset(md5, 0, 2 * digestlen + 1);
for(size_t pos = 0; pos < digestlen; pos++){
snprintf(hexbuf, 3, "%02x", md5hex[pos]);
strncat(md5, hexbuf, 2);
}
std::string md5 = s3fs_hex(md5hex, digestlen);
free(md5hex);
return string(md5);
return md5;
}
string s3fs_sha256sum(int fd, off_t start, ssize_t size)
{
size_t digestlen = get_sha256_digest_length();
char sha256[2 * digestlen + 1];
char hexbuf[3];
unsigned char* sha256hex;
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
return string("");
}
memset(sha256, 0, 2 * digestlen + 1);
for(size_t pos = 0; pos < digestlen; pos++){
snprintf(hexbuf, 3, "%02x", sha256hex[pos]);
strncat(sha256, hexbuf, 2);
}
free(sha256hex);
return string(sha256);
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -17,9 +17,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CURL_H_
#define S3FS_CURL_H_
#include <cassert>
//----------------------------------------------
// Symbols
//----------------------------------------------
#define MIN_MULTIPART_SIZE 5242880 // 5MB
//----------------------------------------------
// class BodyData
//----------------------------------------------
@ -65,7 +73,7 @@ struct filepart
{
bool uploaded; // does finish uploading
std::string etag; // expected etag value
int fd; // base file(temporary full file) discriptor
int fd; // base file(temporary full file) descriptor
off_t startpos; // seek fd point for uploading
ssize_t size; // uploading size
etaglist_t* etaglist; // use only parallel upload
@ -115,6 +123,35 @@ typedef std::map<CURL*, progress_t> curlprogress_t;
class S3fsMultiCurl;
//----------------------------------------------
// class CurlHandlerPool
//----------------------------------------------
class CurlHandlerPool
{
public:
explicit CurlHandlerPool(int maxHandlers)
: mMaxHandlers(maxHandlers)
, mHandlers(NULL)
, mIndex(-1)
{
assert(maxHandlers > 0);
}
bool Init();
bool Destroy();
CURL* GetHandler();
void ReturnHandler(CURL* h);
private:
int mMaxHandlers;
pthread_mutex_t mLock;
CURL** mHandlers;
int mIndex;
};
//----------------------------------------------
// class S3fsCurl
//----------------------------------------------
@ -122,6 +159,21 @@ typedef std::map<std::string, std::string> iamcredmap_t;
typedef std::map<std::string, std::string> sseckeymap_t;
typedef std::list<sseckeymap_t> sseckeylist_t;
// storage class(rrs)
enum storage_class_t {
STANDARD,
STANDARD_IA,
REDUCED_REDUNDANCY
};
// sse type
enum sse_type_t {
SSE_DISABLE = 0, // not use server side encrypting
SSE_S3, // server side encrypting by S3 key
SSE_C, // server side encrypting by custom key
SSE_KMS // server side encrypting by kms id
};
// share
#define SHARE_MUTEX_DNS 0
#define SHARE_MUTEX_SSL_SESSION 1
@ -149,14 +201,18 @@ class S3fsCurl
REQTYPE_COPYMULTIPOST,
REQTYPE_MULTILIST,
REQTYPE_IAMCRED,
REQTYPE_ABORTMULTIUPLOAD
REQTYPE_ABORTMULTIUPLOAD,
REQTYPE_IAMROLE
};
// class variables
static pthread_mutex_t curl_handles_lock;
static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX];
static bool is_initglobal_done;
static CurlHandlerPool* sCurlPool;
static int sCurlPoolSize;
static CURLSH* hCurlShare;
static bool is_cert_check;
static bool is_dns_cache;
static bool is_ssl_session_cache;
static long connect_timeout;
@ -164,9 +220,10 @@ class S3fsCurl
static int retries;
static bool is_public_bucket;
static std::string default_acl; // TODO: to enum
static bool is_use_rrs;
static storage_class_t storage_class;
static sseckeylist_t sseckeys;
static bool is_use_sse;
static std::string ssekmsid;
static sse_type_t ssetype;
static bool is_content_md5;
static bool is_verbose;
static std::string AWSAccessKeyId;
@ -181,6 +238,8 @@ class S3fsCurl
static mimes_t mimeTypes;
static int max_parallel_cnt;
static off_t multipart_size;
static bool is_sigv4;
static bool is_ua; // User-Agent
// variables
CURL* hCurl;
@ -204,12 +263,13 @@ class S3fsCurl
int b_postdata_remaining; // backup for retrying
off_t b_partdata_startpos; // backup for retrying
ssize_t b_partdata_size; // backup for retrying
bool b_ssekey_pos; // backup for retrying
std::string b_ssekey_md5; // backup for retrying
int b_ssekey_pos; // backup for retrying
std::string b_ssevalue; // backup for retrying
sse_type_t b_ssetype; // backup for retrying
public:
// constructor/destructor
S3fsCurl(bool ahbe = false);
explicit S3fsCurl(bool ahbe = false);
~S3fsCurl();
private:
@ -238,21 +298,28 @@ class S3fsCurl
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
static bool SetIAMCredentials(const char* response);
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
static bool SetIAMRoleFromMetaData(const char* response);
static bool LoadEnvSseCKeys(void);
static bool LoadEnvSseKmsid(void);
static bool PushbackSseKeys(std::string& onekey);
static bool AddUserAgent(CURL* hCurl);
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
// methods
bool ResetHandle(void);
bool RemakeHandle(void);
bool ClearInternalData(void);
std::string CalcSignature(std::string method, std::string strMD5, std::string content_type, std::string date, std::string resource);
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
bool GetUploadId(std::string& upload_id);
int GetIAMCredentials(void);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
bool UploadMultipartPostComplete();
public:
// class methods
@ -262,8 +329,9 @@ class S3fsCurl
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
static bool CheckIAMCredentialUpdate(void);
// class methods(valiables)
static std::string LookupMimeType(std::string name);
// class methods(variables)
static std::string LookupMimeType(const std::string& name);
static bool SetCheckCertificate(bool isCertCheck);
static bool SetDnsCache(bool isCache);
static bool SetSslSessionCache(bool isCache);
static long SetConnectTimeout(long timeout);
@ -273,16 +341,23 @@ class S3fsCurl
static bool SetPublicBucket(bool flag);
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
static std::string SetDefaultAcl(const char* acl);
static bool SetUseRrs(bool flag);
static bool GetUseRrs(void) { return S3fsCurl::is_use_rrs; }
static bool SetSseKeys(const char* filepath);
static bool LoadEnvSseKeys(void);
static storage_class_t SetStorageClass(storage_class_t storage_class);
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
static sse_type_t SetSseType(sse_type_t type);
static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; }
static bool IsSseDisable(void) { return (SSE_DISABLE == S3fsCurl::ssetype); }
static bool IsSseS3Type(void) { return (SSE_S3 == S3fsCurl::ssetype); }
static bool IsSseCType(void) { return (SSE_C == S3fsCurl::ssetype); }
static bool IsSseKmsType(void) { return (SSE_KMS == S3fsCurl::ssetype); }
static bool FinalCheckSse(void);
static bool SetSseCKeys(const char* filepath);
static bool SetSseKmsid(const char* kmsid);
static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); }
static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); }
static bool GetSseKey(std::string& md5, std::string& ssekey);
static bool GetSseKeyMd5(int pos, std::string& md5);
static int GetSseKeyCount(void);
static bool IsSseCustomMode(void);
static bool SetUseSse(bool flag);
static bool GetUseSse(void) { return S3fsCurl::is_use_sse; }
static bool SetContentMd5(bool flag);
static bool SetVerbose(bool flag);
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
@ -298,12 +373,17 @@ class S3fsCurl
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
static bool SetMultipartSize(off_t size);
static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; }
static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; }
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
// methods
bool CreateCurlHandle(bool force = false);
bool DestroyCurlHandle(void);
bool AddSseKeyRequestHead(std::string& md5, bool is_copy);
bool LoadIAMRoleFromMetaData(void);
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
bool GetResponseCode(long& responseCode);
int RequestPerform(void);
int DeleteRequest(const char* tpath);
@ -314,17 +394,21 @@ class S3fsCurl
int HeadRequest(const char* tpath, headers_t& meta);
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
int PutRequest(const char* tpath, headers_t& meta, int fd);
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, std::string& ssekeymd5);
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue);
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
int CheckBucket(void);
int ListBucketRequest(const char* tpath, const char* query);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
int MultipartListRequest(std::string& body);
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
// methods(valiables)
// methods(variables)
CURL* GetCurlHandle(void) const { return hCurl; }
std::string GetPath(void) const { return path; }
std::string GetBasePath(void) const { return base_path; }
@ -351,14 +435,13 @@ class S3fsCurl
//
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failuer and retrying
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
class S3fsMultiCurl
{
private:
static int max_multireq;
CURLM* hMulti;
s3fscurlmap_t cMap_all; // all of curl requests
s3fscurlmap_t cMap_req; // curl requests are sent
@ -370,6 +453,8 @@ class S3fsMultiCurl
int MultiPerform(void);
int MultiRead(void);
static void* RequestPerformWrapper(void* arg);
public:
S3fsMultiCurl();
~S3fsMultiCurl();
@ -384,36 +469,6 @@ class S3fsMultiCurl
int Request(void);
};
//----------------------------------------------
// class AdditionalHeader
//----------------------------------------------
typedef std::list<int> charcnt_list_t;
typedef std::map<std::string, std::string> headerpair_t;
typedef std::map<std::string, headerpair_t> addheader_t;
class AdditionalHeader
{
private:
static AdditionalHeader singleton;
bool is_enable;
charcnt_list_t charcntlist;
addheader_t addheader;
public:
// Reference singleton
static AdditionalHeader* get(void) { return &singleton; }
AdditionalHeader();
~AdditionalHeader();
bool Load(const char* file);
void Unload(void);
bool AddHeader(headers_t& meta, const char* path) const;
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
bool Dump(void) const;
};
//----------------------------------------------
// Utility Functions
//----------------------------------------------
@ -421,7 +476,12 @@ std::string GetContentMD5(int fd);
unsigned char* md5hexsum(int fd, off_t start, ssize_t size);
std::string md5sum(int fd, off_t start, ssize_t size);
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
std::string get_sorted_header_keys(const struct curl_slist* list);
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
std::string prepare_url(const char* url);
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
#endif // S3FS_CURL_H_

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -20,6 +20,9 @@
#ifndef FD_CACHE_H_
#define FD_CACHE_H_
#include <sys/statvfs.h>
#include "curl.h"
//------------------------------------------------
// CacheFileStat
//------------------------------------------------
@ -34,8 +37,10 @@ class CacheFileStat
public:
static bool DeleteCacheFileStat(const char* path);
static bool CheckCacheFileStatTopDir(void);
static bool DeleteCacheFileStatDirectory(void);
CacheFileStat(const char* tpath = NULL);
explicit CacheFileStat(const char* tpath = NULL);
~CacheFileStat();
bool Open(void);
@ -52,40 +57,49 @@ struct fdpage
{
off_t offset;
size_t bytes;
bool init;
bool loaded;
fdpage(off_t start = 0, size_t size = 0, bool is_init = false)
: offset(start), bytes(size), init(is_init) {}
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
: offset(start), bytes(size), loaded(is_loaded) {}
off_t next(void) const { return (offset + bytes); }
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
};
typedef std::list<struct fdpage*> fdpage_list_t;
class FdEntity;
//
// Management of loading area/modifying
//
class PageList
{
friend class FdEntity; // only one method access directly pages.
private:
fdpage_list_t pages;
private:
void Clear(void);
bool Compress(void);
bool Parse(off_t new_pos);
public:
static void FreeList(fdpage_list_t& list);
PageList(off_t size = 0, bool is_init = false);
explicit PageList(size_t size = 0, bool is_loaded = false);
~PageList();
off_t Size(void) const;
int Resize(off_t size, bool is_init);
int Init(off_t size, bool is_init);
bool IsInit(off_t start, off_t size);
bool SetInit(off_t start, off_t size, bool is_init = true);
bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize);
int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0);
bool Init(size_t size, bool is_loaded);
size_t Size(void) const;
bool Resize(size_t size, bool is_loaded);
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
bool Serialize(CacheFileStat& file, bool is_output);
void Dump(void);
};
@ -99,40 +113,67 @@ class FdEntity
pthread_mutex_t fdent_lock;
bool is_lock_init;
PageList pagelist;
int refcnt; // reference count
std::string path; // object path
std::string cachepath; // local cache file path
int fd; // file discriptor(tmp file or cache file)
FILE* file; // file pointer(tmp file or cache file)
bool is_modify; // if file is changed, this flag is true
int refcnt; // reference count
std::string path; // object path
std::string cachepath; // local cache file path
// (if this is empty, does not load/save pagelist.)
std::string mirrorpath; // mirror file path to local cache file path
int fd; // file descriptor(tmp file or cache file)
FILE* pfile; // file pointer(tmp file or cache file)
bool is_modify; // if file is changed, this flag is true
headers_t orgmeta; // original headers at opening
size_t size_orgmeta; // original file size in original headers
std::string upload_id; // for no cached multipart uploading when no disk space
etaglist_t etaglist; // for no cached multipart uploading when no disk space
off_t mp_start; // start position for no cached multipart(write method only)
size_t mp_size; // size for no cached multipart(write method only)
private:
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
void Clear(void);
int Dup(void);
bool SetAllStatus(bool is_enable);
int OpenMirrorFile(void);
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
public:
FdEntity(const char* tpath = NULL, const char* cpath = NULL);
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
~FdEntity();
void Close(void);
bool IsOpen(void) const { return (-1 != fd); }
int Open(off_t size = -1, time_t time = -1);
const char* GetPath(void) const { return path.c_str(); }
int GetFd(void) const { return fd; }
int SetMtime(time_t time);
bool GetSize(off_t& size);
bool GetMtime(time_t& time);
bool GetStats(struct stat& st);
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
int Dup(bool no_fd_lock_wait = false);
const char* GetPath(void) const { return path.c_str(); }
void SetPath(const std::string &newpath) { path = newpath; }
int GetFd(void) const { return fd; }
bool GetStats(struct stat& st);
int SetMtime(time_t time);
bool UpdateMtime(void);
bool GetSize(size_t& size);
bool SetMode(mode_t mode);
bool SetUId(uid_t uid);
bool SetGId(gid_t gid);
bool SetContentType(const char* path);
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
int NoCachePreMultipartPost(void);
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
int NoCacheCompleteMultipartPost(void);
int RowFlush(const char* tpath, bool force_sync = false);
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
bool SetAllEnable(void) { return SetAllStatus(true); }
bool SetAllDisable(void) { return SetAllStatus(false); }
bool LoadFull(off_t* size = NULL, bool force_load = false);
int Load(off_t start, off_t size);
int RowFlush(const char* tpath, headers_t& meta, bool force_sync = false);
int Flush(headers_t& meta, bool force_sync = false) { return RowFlush(NULL, meta, force_sync); }
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
ssize_t Write(const char* bytes, off_t start, size_t size);
void CleanupCache();
};
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
@ -144,11 +185,17 @@ class FdManager
private:
static FdManager singleton;
static pthread_mutex_t fd_manager_lock;
static pthread_mutex_t cache_cleanup_lock;
static bool is_lock_init;
static std::string cache_dir;
static size_t page_size;
static bool check_cache_dir_exist;
static size_t free_disk_space; // limit free disk space
fdent_map_t fent;
fdent_map_t fent;
private:
static fsblkcnt_t GetFreeDiskSpace(const char* path);
void CleanupCacheDirInternal(const std::string &path = "");
public:
FdManager();
@ -162,14 +209,24 @@ class FdManager
static bool SetCacheDir(const char* dir);
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
static size_t SetPageSize(size_t size);
static size_t GetPageSize(void) { return FdManager::page_size; }
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
static bool CheckCacheTopDir(void);
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
static bool SetCheckCacheDirExist(bool is_check);
static bool CheckCacheDirExist(void);
FdEntity* GetFdEntity(const char* path);
FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
FdEntity* ExistOpen(const char* path) { return Open(path, -1, -1, false, false); }
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
static size_t SetEnsureFreeDiskSpace(size_t size);
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
static bool IsSafeDiskSpace(const char* path, size_t size);
FdEntity* GetFdEntity(const char* path, int existfd = -1);
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
void Rename(const std::string &from, const std::string &to);
bool Close(FdEntity* ent);
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
void CleanupCacheDir();
};
#endif // FD_CACHE_H_

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -103,7 +103,7 @@ bool s3fs_destroy_crypt_mutex(void)
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
if(!key || !data || !digest || !digestlen){
return false;
}
@ -120,15 +120,34 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
return true;
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
if(!key || !data || !digest || !digestlen){
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(SHA256_DIGEST_SIZE))){
return false;
}
struct hmac_sha256_ctx ctx_hmac;
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
*digestlen = SHA256_DIGEST_SIZE;
return true;
}
#else // USE_GNUTLS_NETTLE
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
if(!key || !data || !digest || !digestlen){
return false;
}
if(0 >= (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
@ -142,6 +161,26 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
return true;
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
if(!key || !data || !digest || !digestlen){
return false;
}
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
return false;
}
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
return false;
}
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
free(*digest);
*digest = NULL;
return false;
}
return true;
}
#endif // USE_GNUTLS_NETTLE
//-------------------------------------------------------------------
@ -178,7 +217,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
md5_update(&ctx_md5, bytes, buf);
@ -222,7 +261,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
memset(buf, 0, 512);
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
DPRNN("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return NULL;
}
@ -234,7 +273,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
gcry_md_write(ctx_md5, buf, bytes);
@ -256,6 +295,154 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
#endif // USE_GNUTLS_NETTLE
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
#define SHA256_DIGEST_LENGTH 32
size_t get_sha256_digest_length(void)
{
return SHA256_DIGEST_LENGTH;
}
#ifdef USE_GNUTLS_NETTLE
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
struct sha256_ctx ctx_sha256;
sha256_init(&ctx_sha256);
sha256_update(&ctx_sha256, datalen, data);
sha256_digest(&ctx_sha256, *digestlen, *digest);
return true;
}
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
{
struct sha256_ctx ctx_sha256;
unsigned char buf[512];
ssize_t bytes;
unsigned char* result;
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
sha256_init(&ctx_sha256);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
sha256_update(&ctx_sha256, bytes, buf);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
return NULL;
}
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
#else // USE_GNUTLS_NETTLE
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
gcry_md_hd_t ctx_sha256;
gcry_error_t err;
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
free(*digest);
return false;
}
gcry_md_write(ctx_sha256, data, datalen);
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
gcry_md_close(ctx_sha256);
return true;
}
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
{
gcry_md_hd_t ctx_sha256;
gcry_error_t err;
char buf[512];
ssize_t bytes;
unsigned char* result;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
}
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return NULL;
}
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
gcry_md_write(ctx_sha256, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
return NULL;
}
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
gcry_md_close(ctx_sha256);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
#endif // USE_GNUTLS_NETTLE
/*
* Local variables:
* tab-width: 4

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -83,9 +83,9 @@ bool s3fs_destroy_crypt_mutex(void)
//-------------------------------------------------------------------
// Utility Function for HMAC
//-------------------------------------------------------------------
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
{
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
if(!key || !data || !digest || !digestlen){
return false;
}
@ -94,17 +94,17 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
PK11Context* Context;
SECStatus SecStatus;
unsigned char tmpdigest[64];
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), keylen};
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
SECItem NullSecItem = {siBuffer, NULL, 0};
if(NULL == (Slot = PK11_GetInternalKeySlot())){
return false;
}
if(NULL == (pKey = PK11_ImportSymKey(Slot, CKM_SHA_1_HMAC, PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
PK11_FreeSlot(Slot);
return false;
}
if(NULL == (Context = PK11_CreateContextBySymKey(CKM_SHA_1_HMAC, CKA_SIGN, pKey, &NullSecItem))){
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
PK11_FreeSymKey(pKey);
PK11_FreeSlot(Slot);
return false;
@ -132,6 +132,16 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
return true;
}
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
}
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
@ -172,7 +182,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
PK11_DigestOp(md5ctx, buf, bytes);
@ -193,6 +203,87 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
return result;
}
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length(void)
{
return SHA256_LENGTH;
}
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
PK11Context* sha256ctx;
unsigned int sha256outlen;
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
PK11_DigestOp(sha256ctx, data, datalen);
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
PK11_DestroyContext(sha256ctx, PR_TRUE);
*digestlen = sha256outlen;
return true;
}
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
{
PK11Context* sha256ctx;
unsigned char buf[512];
ssize_t bytes;
unsigned char* result;
unsigned int sha256outlen;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
}
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
memset(buf, 0, 512);
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
PK11_DestroyContext(sha256ctx, PR_TRUE);
return NULL;
}
PK11_DigestOp(sha256ctx, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
PK11_DestroyContext(sha256ctx, PR_TRUE);
return NULL;
}
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
PK11_DestroyContext(sha256ctx, PR_TRUE);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
/*
* Local variables:
* tab-width: 4

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -32,6 +32,7 @@
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include <openssl/crypto.h>
#include <openssl/err.h>
#include <string>
@ -94,7 +95,9 @@ static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
static unsigned long s3fs_crypt_get_threadid(void)
{
return static_cast<unsigned long>(pthread_self());
// For FreeBSD etc, some system's pthread_t is structure pointer.
// Then we use cast like C style(not C++) instead of ifdef.
return (unsigned long)(pthread_self());
}
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
@ -102,7 +105,7 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l
struct CRYPTO_dynlock_value* dyndata;
if(NULL == (dyndata = static_cast<struct CRYPTO_dynlock_value*>(malloc(sizeof(struct CRYPTO_dynlock_value))))){
DPRNCRIT("Could not allocate memory for CRYPTO_dynlock_value");
S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value");
return NULL;
}
pthread_mutex_init(&(dyndata->dyn_mutex), NULL);
@ -131,14 +134,14 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
bool s3fs_init_crypt_mutex(void)
{
if(s3fs_crypt_mutex){
FPRNNN("s3fs_crypt_mutex is not NULL, destory it.");
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
if(!s3fs_destroy_crypt_mutex()){
DPRN("Failed to s3fs_crypt_mutex");
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
return false;
}
}
if(NULL == (s3fs_crypt_mutex = static_cast<pthread_mutex_t*>(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){
DPRNCRIT("Could not allocate memory for s3fs_crypt_mutex");
S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex");
return false;
}
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
@ -180,20 +183,34 @@ bool s3fs_destroy_crypt_mutex(void)
//-------------------------------------------------------------------
// Utility Function for HMAC
//-------------------------------------------------------------------
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
{
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
if(!key || !data || !digest || !digestlen){
return false;
}
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
if(NULL == ((*digest) = (unsigned char*)malloc(*digestlen))){
return false;
}
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
if(is_sha256){
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
}else{
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
}
return true;
}
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
}
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
{
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
}
//-------------------------------------------------------------------
// Utility Function for MD5
//-------------------------------------------------------------------
@ -233,7 +250,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
MD5_Update(&md5ctx, buf, bytes);
@ -253,6 +270,85 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
return result;
}
//-------------------------------------------------------------------
// Utility Function for SHA256
//-------------------------------------------------------------------
size_t get_sha256_digest_length(void)
{
return SHA256_DIGEST_LENGTH;
}
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
{
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
return false;
}
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(mdctx, md, NULL);
EVP_DigestUpdate(mdctx, data, datalen);
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
EVP_MD_CTX_destroy(mdctx);
return true;
}
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
{
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* sha256ctx;
char buf[512];
ssize_t bytes;
unsigned char* result;
if(-1 == size){
struct stat st;
if(-1 == fstat(fd, &st)){
return NULL;
}
size = static_cast<ssize_t>(st.st_size);
}
// seek to top of file.
if(-1 == lseek(fd, start, SEEK_SET)){
return NULL;
}
sha256ctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(sha256ctx, md, NULL);
memset(buf, 0, 512);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
bytes = read(fd, buf, bytes);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
EVP_MD_CTX_destroy(sha256ctx);
return NULL;
}
EVP_DigestUpdate(sha256ctx, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
EVP_MD_CTX_destroy(sha256ctx);
return NULL;
}
EVP_DigestFinal_ex(sha256ctx, result, NULL);
EVP_MD_CTX_destroy(sha256ctx);
if(-1 == lseek(fd, start, SEEK_SET)){
free(result);
return NULL;
}
return result;
}
/*
* Local variables:
* tab-width: 4

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -84,8 +84,6 @@
#endif // HAVE_MALLOC_TRIM
char* get_object_sseckey_md5(const char* path);
#endif // S3FS_S3_H_
/*

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -20,15 +20,18 @@
#ifndef S3FS_AUTH_H_
#define S3FS_AUTH_H_
#include <string>
#include <sys/types.h>
//-------------------------------------------------------------------
// Utility functions for Authentication
//-------------------------------------------------------------------
//
// in common_auth.cpp
//
char* s3fs_base64(unsigned char* input, size_t length);
std::string s3fs_get_content_md5(int fd);
std::string s3fs_md5sum(int fd, off_t start, ssize_t size);
std::string s3fs_sha256sum(int fd, off_t start, ssize_t size);
//
// in xxxxxx_auth.cpp
@ -39,8 +42,12 @@ bool s3fs_destroy_global_ssl(void);
bool s3fs_init_crypt_mutex(void);
bool s3fs_destroy_crypt_mutex(void);
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
size_t get_md5_digest_length(void);
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size);
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen);
size_t get_sha256_digest_length(void);
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size);
#endif // S3FS_AUTH_H_

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2013 Takeshi Nakatani <ggtakec.com>
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -46,7 +46,7 @@
using namespace std;
//-------------------------------------------------------------------
// Global valiables
// Global variables
//-------------------------------------------------------------------
std::string mount_prefix = "";
@ -110,7 +110,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
if(objects.end() != (iter = objects.find(chkname))){
// found "dir/" object --> not add new object.
// and add normalization
return insert_nomalized(orgname.c_str(), chkname.c_str(), true);
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
}
}
@ -135,10 +135,10 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
}
// add normalization
return insert_nomalized(orgname.c_str(), newname.c_str(), is_dir);
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
}
bool S3ObjList::insert_nomalized(const char* name, const char* normalized, bool is_dir)
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
{
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
return false;
@ -233,7 +233,7 @@ bool S3ObjList::GetLastName(std::string& lastname) const
{
bool result = false;
lastname = "";
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); iter++){
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
if((*iter).second.orgname.length()){
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
lastname = (*iter).second.orgname;
@ -253,7 +253,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla
{
s3obj_t::const_iterator iter;
for(iter = objects.begin(); objects.end() != iter; iter++){
for(iter = objects.begin(); objects.end() != iter; ++iter){
if(OnlyNormalized && 0 != (*iter).second.normalname.length()){
continue;
}
@ -275,7 +275,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
s3obj_h_t::iterator hiter;
s3obj_list_t::const_iterator liter;
for(liter = list.begin(); list.end() != liter; liter++){
for(liter = list.begin(); list.end() != liter; ++liter){
string strtmp = (*liter);
if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){
strtmp = strtmp.substr(0, strtmp.length() - 1);
@ -425,51 +425,25 @@ void free_mvnodes(MVNODE *head)
//-------------------------------------------------------------------
// Class AutoLock
//-------------------------------------------------------------------
AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex), is_locked(false)
AutoLock::AutoLock(pthread_mutex_t* pmutex, bool no_wait) : auto_mutex(pmutex)
{
Lock();
if (no_wait) {
is_lock_acquired = pthread_mutex_trylock(auto_mutex) == 0;
} else {
is_lock_acquired = pthread_mutex_lock(auto_mutex) == 0;
}
}
bool AutoLock::isLockAcquired() const
{
return is_lock_acquired;
}
AutoLock::~AutoLock()
{
Unlock();
}
bool AutoLock::Lock(void)
{
if(!auto_mutex){
return false;
}
if(is_locked){
// already locked
return true;
}
try{
pthread_mutex_lock(auto_mutex);
is_locked = true;
}catch(exception& e){
is_locked = false;
return false;
}
return true;
}
bool AutoLock::Unlock(void)
{
if(!auto_mutex){
return false;
}
if(!is_locked){
// already unlocked
return true;
}
try{
if (is_lock_acquired) {
pthread_mutex_unlock(auto_mutex);
is_locked = false;
}catch(exception& e){
return false;
}
return true;
}
//-------------------------------------------------------------------
@ -478,27 +452,28 @@ bool AutoLock::Unlock(void)
// get user name from uid
string get_username(uid_t uid)
{
static size_t maxlen = 0; // set onece
int result;
static size_t maxlen = 0; // set once
char* pbuf;
struct passwd pwinfo;
struct passwd* ppwinfo = NULL;
// make buffer
if(0 == maxlen){
if(0 > (maxlen = (size_t)sysconf(_SC_GETPW_R_SIZE_MAX))){
DPRNNN("could not get max pw length.");
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
if(0 > res){
S3FS_PRN_WARN("could not get max pw length.");
maxlen = 0;
return string("");
}
maxlen = res;
}
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
DPRNCRIT("failed to allocate memory.");
S3FS_PRN_CRIT("failed to allocate memory.");
return string("");
}
// get group infomation
if(0 != (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
DPRNNN("could not get pw infomation.");
// get group information
if(0 != getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo)){
S3FS_PRN_WARN("could not get pw information.");
free(pbuf);
return string("");
}
@ -512,9 +487,9 @@ string get_username(uid_t uid)
return name;
}
int is_uid_inculde_group(uid_t uid, gid_t gid)
int is_uid_include_group(uid_t uid, gid_t gid)
{
static size_t maxlen = 0; // set onece
static size_t maxlen = 0; // set once
int result;
char* pbuf;
struct group ginfo;
@ -522,19 +497,21 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
// make buffer
if(0 == maxlen){
if(0 > (maxlen = (size_t)sysconf(_SC_GETGR_R_SIZE_MAX))){
DPRNNN("could not get max name length.");
long res = sysconf(_SC_GETGR_R_SIZE_MAX);
if(0 > res){
S3FS_PRN_ERR("could not get max name length.");
maxlen = 0;
return -ERANGE;
}
maxlen = res;
}
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
DPRNCRIT("failed to allocate memory.");
S3FS_PRN_CRIT("failed to allocate memory.");
return -ENOMEM;
}
// get group infomation
// get group information
if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
DPRNNN("could not get group infomation.");
S3FS_PRN_ERR("could not get group information.");
free(pbuf);
return -result;
}
@ -565,6 +542,14 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
//-------------------------------------------------------------------
// safe variant of dirname
// dirname clobbers path so let it operate on a tmp copy
string mydirname(const char* path)
{
if(!path || '\0' == path[0]){
return string("");
}
return mydirname(string(path));
}
string mydirname(string path)
{
return string(dirname((char*)path.c_str()));
@ -572,6 +557,14 @@ string mydirname(string path)
// safe variant of basename
// basename clobbers path so let it operate on a tmp copy
string mybasename(const char* path)
{
if(!path || '\0' == path[0]){
return string("");
}
return mybasename(string(path));
}
string mybasename(string path)
{
return string(basename((char*)path.c_str()));
@ -580,23 +573,102 @@ string mybasename(string path)
// mkdir --parents
int mkdirp(const string& path, mode_t mode)
{
string base;
string component;
string base;
string component;
stringstream ss(path);
while (getline(ss, component, '/')) {
base += "/" + component;
mkdir(base.c_str(), mode);
struct stat st;
if(0 == stat(base.c_str(), &st)){
if(!S_ISDIR(st.st_mode)){
return EPERM;
}
}else{
if(0 != mkdir(base.c_str(), mode)){
return errno;
}
}
}
return 0;
}
// get existed directory path
string get_exist_directory_path(const string& path)
{
string existed("/"); // "/" is existed.
string base;
string component;
stringstream ss(path);
while (getline(ss, component, '/')) {
if(base != "/"){
base += "/";
}
base += component;
struct stat st;
if(0 == stat(base.c_str(), &st) && S_ISDIR(st.st_mode)){
existed = base;
}else{
break;
}
}
return existed;
}
bool check_exist_dir_permission(const char* dirpath)
{
if(!dirpath || '\0' == dirpath[0]){
return false;
}
// exists
struct stat st;
if(0 != stat(dirpath, &st)){
if(ENOENT == errno){
// dir does not exitst
return true;
}
if(EACCES == errno){
// could not access directory
return false;
}
// something error occurred
return false;
}
// check type
if(!S_ISDIR(st.st_mode)){
// path is not directory
return false;
}
// check permission
uid_t myuid = geteuid();
if(myuid == st.st_uid){
if(S_IRWXU != (st.st_mode & S_IRWXU)){
return false;
}
}else{
if(1 == is_uid_include_group(myuid, st.st_gid)){
if(S_IRWXG != (st.st_mode & S_IRWXG)){
return false;
}
}else{
if(S_IRWXO != (st.st_mode & S_IRWXO)){
return false;
}
}
}
return true;
}
bool delete_files_in_dir(const char* dir, bool is_remove_own)
{
DIR* dp;
struct dirent* dent;
if(NULL == (dp = opendir(dir))){
DPRNINFO("could not open dir(%s) - errno(%d)", dir, errno);
S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno);
return false;
}
@ -609,20 +681,20 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
fullpath += dent->d_name;
struct stat st;
if(0 != lstat(fullpath.c_str(), &st)){
DPRN("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
closedir(dp);
return false;
}
if(S_ISDIR(st.st_mode)){
// dir -> Reentrant
if(!delete_files_in_dir(fullpath.c_str(), true)){
DPRNINFO("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno);
S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno);
closedir(dp);
return false;
}
}else{
if(0 != unlink(fullpath.c_str())){
DPRN("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno);
S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno);
closedir(dp);
return false;
}
@ -631,7 +703,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
closedir(dp);
if(is_remove_own && 0 != rmdir(dir)){
DPRN("could not remove dir(%s) - errno(%d)", dir, errno);
S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno);
return false;
}
return true;
@ -664,8 +736,8 @@ off_t get_size(const char *s)
off_t get_size(headers_t& meta)
{
headers_t::const_iterator iter;
if(meta.end() == (iter = meta.find("Content-Length"))){
headers_t::const_iterator iter = meta.find("Content-Length");
if(meta.end() == iter){
return 0;
}
return get_size((*iter).second.c_str());
@ -701,13 +773,30 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
}else{
if(meta.end() != (iter = meta.find("Content-Type"))){
string strConType = (*iter).second;
// Leave just the mime type, remove any optional parameters (eg charset)
string::size_type pos = strConType.find(";");
if(string::npos != pos){
strConType = strConType.substr(0, pos);
}
if(strConType == "application/x-directory"){
mode |= S_IFDIR;
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
mode |= S_IFDIR;
}else{
mode |= S_IFREG;
if(complement_stat){
// If complement lack stat mode, when the object has '/' charactor at end of name
// and content type is text/plain and the object's size is 0 or 1, it should be
// directory.
off_t size = get_size(meta);
if(strConType == "text/plain" && (0 == size || 1 == size)){
mode |= S_IFDIR;
}else{
mode |= S_IFREG;
}
}else{
mode |= S_IFREG;
}
}
}else{
mode |= S_IFREG;
@ -717,6 +806,11 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
}
}
}
// If complement lack stat mode, when it's mode is not set any permission,
// the object is added minimal mode only for read permission.
if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){
mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR));
}
}else{
if(!checkdir){
// cut dir/reg flag.
@ -789,8 +883,8 @@ time_t get_lastmodified(const char* s)
time_t get_lastmodified(headers_t& meta)
{
headers_t::const_iterator iter;
if(meta.end() == (iter = meta.find("Last-Modified"))){
headers_t::const_iterator iter = meta.find("Last-Modified");
if(meta.end() == iter){
return 0;
}
return get_lastmodified((*iter).second.c_str());
@ -847,6 +941,17 @@ void show_help (void)
"\n"
"Mount an Amazon S3 bucket as a file system.\n"
"\n"
"Usage:\n"
" mounting\n"
" s3fs bucket[:/path] mountpoint [options]\n"
" s3fs mountpoint [options(must specify bucket= option)]\n"
"\n"
" umounting\n"
" umount mountpoint\n"
"\n"
" utility mode (remove interrupted multipart uploading objects)\n"
" s3fs -u bucket\n"
"\n"
" General forms for s3fs and FUSE/mount options:\n"
" -o opt[,opt...]\n"
" -o opt [-o opt] ...\n"
@ -857,10 +962,15 @@ void show_help (void)
"\n"
" <option_name>=<option_value>\n"
"\n"
" bucket\n"
" - if it is not specified bucket name(and path) in command line,\n"
" must specify this option after -o option for bucket name.\n"
"\n"
" default_acl (default=\"private\")\n"
" - the default canned acl to apply to all written s3 objects\n"
" see http://aws.amazon.com/documentation/s3/ for the \n"
" full list of canned acls\n"
" - the default canned acl to apply to all written s3 objects,\n"
" e.g., private, public-read. empty string means do not send\n"
" header. see http://aws.amazon.com/documentation/s3/ for the\n"
" full list of canned acls\n"
"\n"
" retries (default=\"2\")\n"
" - number of times to retry a failed s3 transaction\n"
@ -868,34 +978,69 @@ void show_help (void)
" use_cache (default=\"\" which means disabled)\n"
" - local folder to use for local file cache\n"
"\n"
" check_cache_dir_exist (default is disable)\n"
" - if use_cache is set, check if the cache directory exists.\n"
" if this option is not specified, it will be created at runtime\n"
" when the cache directory does not exist.\n"
"\n"
" del_cache (delete local file cache)\n"
" - delete local file cache when s3fs starts and exits.\n"
"\n"
" use_rrs (default is disable)\n"
" - this option makes Amazon's Reduced Redundancy Storage enable.\n"
" storage_class (default=\"standard\")\n"
" - store object with specified storage class. Possible values:\n"
" standard, standard_ia, and reduced_redundancy.\n"
"\n"
" use_sse (default is disable)\n"
" - use Amazon<EFBFBD>fs Server-Site Encryption or Server-Side Encryption\n"
" with Customer-Provided Encryption Keys.\n"
" this option can not be specified with use_rrs. specifying only \n"
" \"use_sse\" or \"use_sse=1\" enables Server-Side Encryption.\n"
" (use_sse=1 for old version)\n"
" specifying this option with file path which has some SSE-C\n"
" secret key enables Server-Side Encryption with Customer-Provided\n"
" Encryption Keys.(use_sse=file)\n"
" the file must be 600 permission. the file can have some lines,\n"
" each line is one SSE-C key. the first line in file is used as\n"
" Customer-Provided Encryption Keys for uploading and changing\n"
" headers etc.\n"
" if there are some keys after first line, those are used\n"
" downloading object which are encripted by not first key.\n"
" so that, you can keep all SSE-C keys in file, that is SSE-C\n"
" key history.\n"
" if AWSSSECKEYS environment is set, you can set SSE-C key instead\n"
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
" SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n"
" keys, SSE-C uses customer-provided encryption keys, and\n"
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
" type(use_sse=1 is old type parameter).\n"
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
" \"use_sse=custom:<custom key file path>\" or\n"
" \"use_sse=<custom key file path>\"(only <custom key file path>\n"
" specified is old type parameter). You can use \"c\" for\n"
" short \"custom\".\n"
" The custom key file must be 600 permission. The file can\n"
" have some lines, each line is one SSE-C key. The first line\n"
" in file is used as Customer-Provided Encryption Keys for\n"
" uploading and changing headers etc. If there are some keys\n"
" after first line, those are used downloading object which\n"
" are encrypted by not first key. So that, you can keep all\n"
" SSE-C keys in file, that is SSE-C key history.\n"
" If you specify \"custom\"(\"c\") without file path, you\n"
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
" environment.(AWSSSECKEYS environment has some SSE-C keys\n"
" with \":\" separator.) This option is used to decide the\n"
" SSE type. So that if you do not want to encrypt a object\n"
" object at uploading, but you need to decrypt encrypted\n"
" object at downloading, you can use load_sse_c option instead\n"
" of this option.\n"
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
" KMS, you can set it after \"kmsid:\"(or \"k:\"). If you\n"
" specify only \"kmsid\"(\"k\"), you need to set AWSSSEKMSID\n"
" environment which value is <kms id>. You must be careful\n"
" about that you can not use the KMS id which is not same EC2\n"
" region.\n"
"\n"
" load_sse_c - specify SSE-C keys\n"
" Specify the custom-provided encryption keys file path for decrypting\n"
" at downloading.\n"
" If you use the custom-provided encryption key at uploading, you\n"
" specify with \"use_sse=custom\". The file has many lines, one line\n"
" means one custom key. So that you can keep all SSE-C keys in file,\n"
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
" file contents.\n"
"\n"
" public_bucket (default=\"\" which means disabled)\n"
" - anonymously mount a public bucket when set to 1\n"
" - anonymously mount a public bucket when set to 1, ignores the \n"
" $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n"
" S3 does not allow copy object api for anonymous users, then\n"
" s3fs sets nocopyapi option automatically when public_bucket=1\n"
" option is specified.\n"
"\n"
" passwd_file (default=\"\")\n"
" - specify which s3fs password file to use\n"
@ -905,26 +1050,28 @@ void show_help (void)
" file is the additional HTTP header by file(object) extension.\n"
" The configuration file format is below:\n"
" -----------\n"
" line = [file suffix] HTTP-header [HTTP-values]\n"
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
" file suffix = file(object) suffix, if this field is empty,\n"
" it means \"*\"(all object).\n"
" it means \"reg:(.*)\".(=all object).\n"
" regex = regular expression to match the file(object) path.\n"
" this type starts with \"reg:\" prefix.\n"
" HTTP-header = additional HTTP header name\n"
" HTTP-values = additional HTTP header value\n"
" -----------\n"
" Sample:\n"
" -----------\n"
" .gz Content-Encoding gzip\n"
" .Z Content-Encoding compress\n"
" X-S3FS-MYHTTPHEAD myvalue\n"
" .gz Content-Encoding gzip\n"
" .Z Content-Encoding compress\n"
" reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n"
" -----------\n"
" A sample configuration file is uploaded in \"test\" directory.\n"
" If you specify this option for set \"Content-Encoding\" HTTP \n"
" header, please take care for RFC 2616.\n"
"\n"
" connect_timeout (default=\"10\" seconds)\n"
" connect_timeout (default=\"300\" seconds)\n"
" - time to wait for connection before giving up\n"
"\n"
" readwrite_timeout (default=\"30\" seconds)\n"
" readwrite_timeout (default=\"60\" seconds)\n"
" - time to wait between read/write activity before giving up\n"
"\n"
" max_stat_cache_size (default=\"1000\" entries (about 4MB))\n"
@ -932,6 +1079,7 @@ void show_help (void)
"\n"
" stat_cache_expire (default is no expire)\n"
" - specify expire time(seconds) for entries in the stat cache.\n"
" This expire time indicates the time since stat cached.\n"
"\n"
" enable_noobj_cache (default is disable)\n"
" - enable cache entries for the object which does not exist.\n"
@ -943,6 +1091,10 @@ void show_help (void)
" You can specify this option for performance, s3fs memorizes \n"
" in stat cache that the object(file or directory) does not exist.\n"
"\n"
" no_check_certificate\n"
" - server certificate won't be checked against the available \n"
" certificate authorities.\n"
"\n"
" nodnscache (disable dns cache)\n"
" - s3fs is always using dns cache, this option make dns cache disable.\n"
"\n"
@ -961,28 +1113,70 @@ void show_help (void)
" at once. It is necessary to set this value depending on a CPU \n"
" and a network band.\n"
"\n"
" fd_page_size (default=\"52428800\"(50MB))\n"
" - number of internal management page size for each file discriptor.\n"
" For delayed reading and writing by s3fs, s3fs manages pages which \n"
" is separated from object. Each pages has a status that data is \n"
" already loaded(or not loaded yet).\n"
" This option should not be changed when you don't have a trouble \n"
" with performance.\n"
" multipart_size (default=\"10\")\n"
" - part size, in MB, for each multipart request.\n"
"\n"
" url (default=\"http://s3.amazonaws.com\")\n"
" - sets the url to use to access amazon s3\n"
" ensure_diskfree (default same multipart_size value)\n"
" - sets MB to ensure disk free space. s3fs makes file for\n"
" downloading, uploading and caching files. If the disk free\n"
" space is smaller than this value, s3fs do not use diskspace\n"
" as possible in exchange for the performance.\n"
"\n"
" singlepart_copy_limit (default=\"5120\")\n"
" - maximum size, in MB, of a single-part copy before trying \n"
" multipart copy.\n"
"\n"
" url (default=\"https://s3.amazonaws.com\")\n"
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
" then you can set \"url=http://s3.amazonaws.com\".\n"
" If you start s3fs without specifying the url option, s3fs will\n"
" check the bucket using https://s3.amazonaws.com. And when bucket\n"
" check fails, s3fs retries the bucket check using\n"
" http://s3.amazonaws.com. This is the function left behind for\n"
" backward compatibility. If you do not use https, please specify\n"
" the URL with the url option.\n"
"\n"
" endpoint (default=\"us-east-1\")\n"
" - sets the endpoint to use on signature version 4\n"
" If this option is not specified, s3fs uses \"us-east-1\" region as\n"
" the default. If the s3fs could not connect to the region specified\n"
" by this option, s3fs could not run. But if you do not specify this\n"
" option, and if you can not connect with the default region, s3fs\n"
" will retry to automatically connect to the other region. So s3fs\n"
" can know the correct region name, because s3fs can find it in an\n"
" error from the S3 server.\n"
"\n"
" sigv2 (default is signature version 4)\n"
" - sets signing AWS requests by sing Signature Version 2\n"
"\n"
" mp_umask (default is \"0000\")\n"
" - sets umask for the mount point directory.\n"
" If allow_other option is not set, s3fs allows access to the mount\n"
" point only to the owner. In the opposite case s3fs allows access\n"
" to all users as the default. But if you set the allow_other with\n"
" this option, you can control the permissions of the\n"
" mount point by this option like umask.\n"
"\n"
" nomultipart (disable multipart uploads)\n"
"\n"
" enable_content_md5 (default is disable)\n"
" - verifying uploaded object without multipart by content-md5 header.\n"
" - ensure data integrity during writes with MD5 hash.\n"
"\n"
" iam_role (default is no role)\n"
" - set the IAM Role that will supply the credentials from the \n"
" instance meta-data.\n"
" iam_role (default is no IAM role)\n"
" - This option requires the IAM role name or \"auto\". If you specify\n"
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
" to an instance. If you specify this option without any argument, it\n"
" is the same as that you have specified the \"auto\".\n"
"\n"
" noxmlns (disable registing xml name space)\n"
" disable registing xml name space for response of \n"
" use_xattr (default is not handling the extended attribute)\n"
" Enable to handle the extended attribute(xattrs).\n"
" If you set this option, you can use the extended attribute.\n"
" For example, encfs and ecryptfs need to support the extended attribute.\n"
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
" copy command with preserve=mode.\n"
"\n"
" noxmlns (disable registering xml name space)\n"
" disable registering xml name space for response of \n"
" ListBucketResult and ListVersionsResult etc. Default name \n"
" space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n"
" This option should not be specified now, because s3fs looks up\n"
@ -1002,13 +1196,61 @@ void show_help (void)
" option does not use copy-api for all command(ex. chmod, chown,\n"
" touch, mv, etc), but this option does not use copy-api for\n"
" only rename command(ex. mv). If this option is specified with\n"
" nocopapi, the s3fs ignores it.\n"
" nocopyapi, then s3fs ignores it.\n"
"\n"
" use_path_request_style (use legacy API calling style)\n"
" Enble compatibility with S3-like APIs which do not support\n"
" Enable compatibility with S3-like APIs which do not support\n"
" the virtual-host request style, by using the older path request\n"
" style.\n"
"\n"
" noua (suppress User-Agent header)\n"
" Usually s3fs outputs of the User-Agent in \"s3fs/<version> (commit\n"
" hash <hash>; <using ssl library name>)\" format.\n"
" If this option is specified, s3fs suppresses the output of the\n"
" User-Agent.\n"
"\n"
" dbglevel (default=\"crit\")\n"
" Set the debug message level. set value as crit(critical), err\n"
" (error), warn(warning), info(information) to debug level.\n"
" default debug level is critical. If s3fs run with \"-d\" option,\n"
" the debug level is set information. When s3fs catch the signal\n"
" SIGUSR2, the debug level is bumpup.\n"
"\n"
" curldbg - put curl debug message\n"
" Put the debug message from libcurl when this option is specified.\n"
"\n"
" cipher_suites - customize TLS cipher suite list\n"
" Customize the list of TLS cipher suites.\n"
" Expects a colon separated list of cipher suite names.\n"
" A list of available cipher suites, depending on your TLS engine,\n"
" can be found on the CURL library documentation:\n"
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
"\n"
" complement_stat (complement lack of file/directory mode)\n"
" s3fs complements lack of information about file/directory mode\n"
" if a file or a directory object does not have x-amz-meta-mode\n"
" header. As default, s3fs does not complements stat information\n"
" for a object, then the object will not be able to be allowed to\n"
" list/modify.\n"
"\n"
" notsup_compat_dir (not support compatibility directory types)\n"
" As a default, s3fs supports objects of the directory type as\n"
" much as possible and recognizes them as directories.\n"
" Objects that can be recognized as directory objects are \"dir/\",\n"
" \"dir\", \"dir_$folder$\", and there is a file object that does\n"
" not have a directory object but contains that directory path.\n"
" s3fs needs redundant communication to support all these\n"
" directory types. The object as the directory created by s3fs\n"
" is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n"
" a directory, communication traffic can be reduced. This option\n"
" is used to give this restriction to s3fs.\n"
" However, if there is a directory object other than \"dir/\" in\n"
" the bucket, specifying this option is not recommended. s3fs may\n"
" not be able to recognize the object correctly if an object\n"
" created by s3fs exists in the bucket.\n"
" Please use this option when the directory in the bucket is\n"
" only \"dir/\" object.\n"
"\n"
"FUSE/mount Options:\n"
"\n"
" Most of the generic mount options described in 'man mount' are\n"
@ -1027,12 +1269,11 @@ void show_help (void)
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
" twice turns on FUSE debug messages to STDOUT.\n"
" -f FUSE foreground option - do not run as daemon.\n"
" -s FUSE singlethread option\n"
" -s FUSE singlethreaded option\n"
" disable multi-threaded operation\n"
"\n"
"\n"
"Report bugs to <s3fs-devel@googlegroups.com>\n"
"s3fs home page: <http://code.google.com/p/s3fs/>\n"
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
);
return;
}
@ -1040,12 +1281,12 @@ void show_help (void)
void show_version(void)
{
printf(
"Amazon Simple Storage Service File System V%s with %s\n"
"Amazon Simple Storage Service File System V%s(commit:%s) with %s\n"
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
"License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n",
VERSION, s3fs_crypt_lib_name());
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
return;
}

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -27,7 +27,7 @@
// Struct
//
struct s3obj_entry{
std::string normalname; // normalized name: if empty, object is nomalized name.
std::string normalname; // normalized name: if empty, object is normalized name.
std::string orgname; // original name: if empty, object is original name.
std::string etag;
bool is_dir;
@ -47,7 +47,7 @@ class S3ObjList
s3obj_t objects;
private:
bool insert_nomalized(const char* name, const char* normalized, bool is_dir);
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
const s3obj_entry* GetS3Obj(const char* name) const;
s3obj_t::const_iterator begin(void) const {
@ -88,14 +88,12 @@ class AutoLock
{
private:
pthread_mutex_t* auto_mutex;
bool is_locked;
bool is_lock_acquired;
public:
AutoLock(pthread_mutex_t* pmutex = NULL);
explicit AutoLock(pthread_mutex_t* pmutex, bool no_wait = false);
bool isLockAcquired() const;
~AutoLock();
bool Lock(void);
bool Unlock(void);
};
//-------------------------------------------------------------------
@ -108,11 +106,15 @@ MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const cha
void free_mvnodes(MVNODE *head);
std::string get_username(uid_t uid);
int is_uid_inculde_group(uid_t uid, gid_t gid);
int is_uid_include_group(uid_t uid, gid_t gid);
std::string mydirname(const char* path);
std::string mydirname(std::string path);
std::string mybasename(const char* path);
std::string mybasename(std::string path);
int mkdirp(const std::string& path, mode_t mode);
std::string get_exist_directory_path(const std::string& path);
bool check_exist_dir_permission(const char* dirpath);
bool delete_files_in_dir(const char* dir, bool is_remove_own);
time_t get_mtime(const char *s);

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -17,7 +17,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
@ -84,13 +86,6 @@ string lower(string s)
return s;
}
string IntToStr(int n)
{
stringstream result;
result << n;
return result.str();
}
string trim_left(const string &s, const string &t /* = SPACES */)
{
string d(s);
@ -123,26 +118,98 @@ string urlEncode(const string &s)
{
string result;
for (unsigned i = 0; i < s.length(); ++i) {
if (s[i] == '/') { // Note- special case for fuse paths...
result += s[i];
} else if (isalnum(s[i])) {
result += s[i];
} else if (s[i] == '.' || s[i] == '-' || s[i] == '*' || s[i] == '_') {
result += s[i];
} else if (s[i] == ' ') {
result += '%';
result += '2';
result += '0';
char c = s[i];
if (c == '/' // Note- special case for fuse paths...
|| c == '.'
|| c == '-'
|| c == '_'
|| c == '~'
|| (c >= 'a' && c <= 'z')
|| (c >= 'A' && c <= 'Z')
|| (c >= '0' && c <= '9')) {
result += c;
} else {
result += "%";
result += hexAlphabet[static_cast<unsigned char>(s[i]) / 16];
result += hexAlphabet[static_cast<unsigned char>(s[i]) % 16];
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
}
}
return result;
}
/**
* urlEncode a fuse path,
* taking into special consideration "/",
* otherwise regular urlEncode.
*/
string urlEncode2(const string &s)
{
string result;
for (unsigned i = 0; i < s.length(); ++i) {
char c = s[i];
if (c == '=' // Note- special case for fuse paths...
|| c == '&' // Note- special case for s3...
|| c == '%'
|| c == '.'
|| c == '-'
|| c == '_'
|| c == '~'
|| (c >= 'a' && c <= 'z')
|| (c >= 'A' && c <= 'Z')
|| (c >= '0' && c <= '9')) {
result += c;
} else {
result += "%";
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
}
}
return result;
}
string urlDecode(const string& s)
{
string result;
for(unsigned i = 0; i < s.length(); ++i){
if(s[i] != '%'){
result += s[i];
}else{
char ch = 0;
if(s.length() <= ++i){
break; // wrong format.
}
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
if(s.length() <= ++i){
break; // wrong format.
}
ch *= 16;
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
result += ch;
}
}
return result;
}
bool takeout_str_dquart(string& str)
{
size_t pos;
// '"' for start
if(string::npos != (pos = str.find_first_of("\""))){
str = str.substr(pos + 1);
// '"' for end
if(string::npos == (pos = str.find_last_of("\""))){
return false;
}
str = str.substr(0, pos);
if(string::npos != str.find_first_of("\"")){
return false;
}
}
return true;
}
//
// ex. target="http://......?keyword=value&..."
//
@ -169,48 +236,11 @@ bool get_keyword_value(string& target, const char* keyword, string& value)
return true;
}
string prepare_url(const char* url)
{
FPRNINFO("URL is %s", url);
string uri;
string host;
string path;
string url_str = str(url);
string token = str("/" + bucket);
int bucket_pos = url_str.find(token);
int bucket_length = token.size();
int uri_length = 7;
if(!strncasecmp(url_str.c_str(), "https://", 8)){
uri_length = 8;
}
uri = url_str.substr(0, uri_length);
if(!pathrequeststyle){
host = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length).c_str();
path = url_str.substr((bucket_pos + bucket_length));
}else{
host = url_str.substr(uri_length, bucket_pos - uri_length).c_str();
string part = url_str.substr((bucket_pos + bucket_length));
if('/' != part[0]){
part = "/" + part;
}
path = "/" + bucket + part;
}
url_str = uri + host + path;
FPRNINFO("URL changed is %s", url_str.c_str());
return str(url_str);
}
/**
* Returns the current date
* in a format suitable for a HTTP request header.
*/
string get_date()
string get_date_rfc850()
{
char buf[100];
time_t t = time(NULL);
@ -218,6 +248,125 @@ string get_date()
return buf;
}
void get_date_sigv3(string& date, string& date8601)
{
time_t tm = time(NULL);
date = get_date_string(tm);
date8601 = get_date_iso8601(tm);
}
string get_date_string(time_t tm)
{
char buf[100];
strftime(buf, sizeof(buf), "%Y%m%d", gmtime(&tm));
return buf;
}
string get_date_iso8601(time_t tm)
{
char buf[100];
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime(&tm));
return buf;
}
std::string s3fs_hex(const unsigned char* input, size_t length)
{
std::string hex;
for(size_t pos = 0; pos < length; ++pos){
char hexbuf[3];
snprintf(hexbuf, 3, "%02x", input[pos]);
hex += hexbuf;
}
return hex;
}
char* s3fs_base64(const unsigned char* input, size_t length)
{
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
char* result;
if(!input || 0 >= length){
return NULL;
}
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
parts[0] = (input[rpos] & 0xfc) >> 2;
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
result[wpos++] = base[parts[0]];
result[wpos++] = base[parts[1]];
result[wpos++] = base[parts[2]];
result[wpos++] = base[parts[3]];
}
result[wpos] = '\0';
return result;
}
inline unsigned char char_decode64(const char ch)
{
unsigned char by;
if('A' <= ch && ch <= 'Z'){ // A - Z
by = static_cast<unsigned char>(ch - 'A');
}else if('a' <= ch && ch <= 'z'){ // a - z
by = static_cast<unsigned char>(ch - 'a' + 26);
}else if('0' <= ch && ch <= '9'){ // 0 - 9
by = static_cast<unsigned char>(ch - '0' + 52);
}else if('+' == ch){ // +
by = 62;
}else if('/' == ch){ // /
by = 63;
}else if('=' == ch){ // =
by = 64;
}else{ // something wrong
by = UCHAR_MAX;
}
return by;
}
unsigned char* s3fs_decode64(const char* input, size_t* plength)
{
unsigned char* result;
if(!input || 0 == strlen(input) || !plength){
return NULL;
}
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t input_len = strlen(input);
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
parts[0] = char_decode64(input[rpos]);
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
if(64 == parts[2]){
break;
}
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
if(64 == parts[3]){
break;
}
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
}
result[wpos] = '\0';
*plength = wpos;
return result;
}
/*
* Local variables:
* tab-width: 4

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -25,6 +25,7 @@
*/
#include <string.h>
#include <syslog.h>
#include <sys/types.h>
#include <string>
#include <sstream>
@ -44,12 +45,20 @@ std::string trim_left(const std::string &s, const std::string &t = SPACES);
std::string trim_right(const std::string &s, const std::string &t = SPACES);
std::string trim(const std::string &s, const std::string &t = SPACES);
std::string lower(std::string s);
std::string IntToStr(int);
std::string get_date();
std::string get_date_rfc850(void);
void get_date_sigv3(std::string& date, std::string& date8601);
std::string get_date_string(time_t tm);
std::string get_date_iso8601(time_t tm);
std::string urlEncode(const std::string &s);
std::string prepare_url(const char* url);
std::string urlEncode2(const std::string &s);
std::string urlDecode(const std::string& s);
bool takeout_str_dquart(std::string& str);
bool get_keyword_value(std::string& target, const char* keyword, std::string& value);
std::string s3fs_hex(const unsigned char* input, size_t length);
char* s3fs_base64(const unsigned char* input, size_t length);
unsigned char* s3fs_decode64(const char* input, size_t* plength);
#endif // S3FS_STRING_UTIL_H_
/*

83
src/test_string_util.cpp Normal file
View File

@ -0,0 +1,83 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits>
#include <stdint.h>
#include <string>
#include "string_util.h"
#include "test_util.h"
void test_trim()
{
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim("1234"));
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
ASSERT_EQUALS(std::string("0"), str(0));
ASSERT_EQUALS(std::string("1"), str(1));
ASSERT_EQUALS(std::string("-1"), str(-1));
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
}
void test_base64()
{
size_t len;
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
ASSERT_EQUALS(len, static_cast<size_t>(1));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
ASSERT_EQUALS(len, static_cast<size_t>(2));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
ASSERT_EQUALS(len, static_cast<size_t>(3));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
ASSERT_EQUALS(len, static_cast<size_t>(4));
// TODO: invalid input
}
int main(int argc, char *argv[])
{
test_trim();
test_base64();
return 0;
}

47
src/test_util.h Normal file
View File

@ -0,0 +1,47 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdlib>
#include <iostream>
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
{
if (x != y) {
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
std::exit(1);
}
}
void assert_strequals(const char *x, const char *y, const char *file, int line)
{
if(x == NULL && y == NULL){
return;
// cppcheck-suppress nullPointerRedundantCheck
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
std::exit(1);
}
}
#define ASSERT_EQUALS(x, y) \
assert_equals((x), (y), __FILE__, __LINE__)
#define ASSERT_STREQUALS(x, y) \
assert_strequals((x), (y), __FILE__, __LINE__)

View File

@ -27,3 +27,4 @@ EXTRA_DIST = \
sample_delcache.sh \
sample_ahbe.conf
testdir = test

View File

@ -1,14 +1,215 @@
#!/bin/bash -e
#!/bin/bash
#
# Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance
# to run tests against S3Proxy locally.
#
# To run against an Amazon S3 or other S3 provider, specify the following
# environment variables:
#
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
# TEST_BUCKET_1=bucketname Name of bucket to use
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
# S3_URL="http://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
#
# Example of running against Amazon S3 using a bucket named "bucket:
#
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh
#
# To change the s3fs-fuse debug level:
#
# DBGLEVEL=debug ./small-integration-test.sh
#
# To stop and wait after the mount point is up for manual interaction. This allows you to
# explore the mounted file system exactly as it would have been started for the test case
#
# INTERACT=1 DBGLEVEL=debug ./small-integration-test.sh
#
# Run all of the tests from the makefile
#
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" make check
#
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
# useful for poking around with plain old curl
#
# PUBLIC=1 INTERACT=1 ./small-integration-test.sh
#
# A valgrind tool can be specified
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
set -o errexit
S3FS=../src/s3fs
S3FS_CREDENTIALS_FILE=$(eval echo ~${SUDO_USER}/.passwd-s3fs)
# Allow these defaulted values to be overridden
: ${S3_URL:="http://127.0.0.1:8080"}
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
: ${TEST_BUCKET_1:="s3fs-integration-test"}
TEST_BUCKET_1=${SUDO_USER}-s3fs-integration-test
TEST_BUCKET_MOUNT_POINT_1=/mnt/${TEST_BUCKET_1}
export TEST_BUCKET_1
export S3_URL
export TEST_SCRIPT_DIR=`pwd`
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
S3PROXY_VERSION="1.5.2"
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
then
echo "Missing credentials file: $S3FS_CREDENTIALS_FILE"
exit 1
fi
chmod 600 "$S3FS_CREDENTIALS_FILE"
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
then
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
fi
# This function execute the function parameters $1 times
# before giving up, with 1 second delays.
function retry {
set +o errexit
N=$1; shift;
status=0
for i in $(seq $N); do
echo "Trying: $@"
$@
status=$?
if [ $status == 0 ]; then
break
fi
sleep 1
echo "Retrying: $@"
done
if [ $status != 0 ]; then
echo "timeout waiting for $@"
fi
set -o errexit
return $status
}
# Proxy is not started if S3PROXY_BINARY is an empty string
# PUBLIC unset: use s3proxy.conf
# PUBLIC=1: use s3proxy-noauth.conf (no request signing)
#
function start_s3proxy {
if [ -n "${PUBLIC}" ]; then
S3PROXY_CONFIG="s3proxy-noauth.conf"
else
S3PROXY_CONFIG="s3proxy.conf"
fi
if [ -n "${S3PROXY_BINARY}" ]
then
if [ ! -e "${S3PROXY_BINARY}" ]; then
wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
--quiet -O "${S3PROXY_BINARY}"
chmod +x "${S3PROXY_BINARY}"
fi
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
# wait for S3Proxy to start
for i in $(seq 30);
do
if exec 3<>"/dev/tcp/127.0.0.1/8080";
then
exec 3<&- # Close for read
exec 3>&- # Close for write
break
fi
sleep 1
done
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
fi
}
function stop_s3proxy {
if [ -n "${S3PROXY_PID}" ]
then
kill $S3PROXY_PID
wait $S3PROXY_PID
fi
}
# Mount the bucket, function arguments passed to s3fs in addition to
# a set of common arguments.
function start_s3fs {
# Public bucket if PUBLIC is set
if [ -n "${PUBLIC}" ]; then
AUTH_OPT="-o public_bucket=1"
else
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
fi
# If VALGRIND is set, pass it as options to valgrind.
# start valgrind-listener in another shell.
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
# Start valgind-listener (default port is 1500)
if [ -n "${VALGRIND}" ]; then
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
fi
# Common s3fs options:
#
# TODO: Allow all these options to be overriden with env variables
#
# use_path_request_style
# The test env doesn't have virtual hosts
# createbucket
# S3Proxy always starts with no buckets, this tests the s3fs-fuse
# automatic bucket creation path.
# $AUTH_OPT
# Will be either "-o public_bucket=1"
# or
# "-o passwd_file=${S3FS_CREDENTIALS_FILE}"
# dbglevel
# error by default. override with DBGLEVEL env variable
# -f
# Keep s3fs in foreground instead of daemonizing
#
# subshell with set -x to log exact invocation of s3fs-fuse
(
set -x
stdbuf -oL -eL \
${VALGRIND_EXEC} ${S3FS} \
$TEST_BUCKET_1 \
$TEST_BUCKET_MOUNT_POINT_1 \
-o use_path_request_style \
-o url=${S3_URL} \
-o createbucket \
${AUTH_OPT} \
-o dbglevel=${DBGLEVEL:=info} \
-f \
${@} \
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
)
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
# Quick way to start system up for manual testing with options under test
if [[ -n ${INTERACT} ]]; then
echo "Mountpoint $TEST_BUCKET_MOUNT_POINT_1 is ready"
echo "control-C to quit"
sleep infinity
exit 0
fi
}
function stop_s3fs {
# Retry in case file system is in use
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
fi
}
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
function common_exit_handler {
stop_s3proxy
stop_s3fs
}
trap common_exit_handler EXIT

426
test/integration-test-main.sh Executable file
View File

@ -0,0 +1,426 @@
#!/bin/bash
set -o errexit
source test-utils.sh
function test_append_file {
describe "Testing append to file ..."
# Write a small test file
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
do
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
done > ${TEST_TEXT_FILE}
# Verify contents of file
echo "Verifying length of test file"
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
then
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
return 1
fi
rm_test_file
}
function test_truncate_file {
describe "Testing truncate file ..."
# Write a small test file
echo "${TEST_TEXT}" > ${TEST_TEXT_FILE}
# Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...)
: > ${TEST_TEXT_FILE}
# Verify file is zero length
if [ -s ${TEST_TEXT_FILE} ]
then
echo "error: expected ${TEST_TEXT_FILE} to be zero length"
return 1
fi
rm_test_file
}
function test_truncate_empty_file {
describe "Testing truncate empty file ..."
# Write an empty test file
touch ${TEST_TEXT_FILE}
# Truncate the file to 1024 length
t_size=1024
truncate ${TEST_TEXT_FILE} -s $t_size
# Verify file is zero length
size=$(stat -c %s ${TEST_TEXT_FILE})
if [ $t_size -ne $size ]
then
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
return 1
fi
rm_test_file
}
function test_mv_file {
describe "Testing mv file function ..."
# if the rename file exists, delete it
if [ -e $ALT_TEST_TEXT_FILE ]
then
rm $ALT_TEST_TEXT_FILE
fi
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
return 1
fi
# create the test file again
mk_test_file
#rename the test file
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
if [ ! -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not move file"
return 1
fi
# Check the contents of the alt file
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
then
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
return 1
fi
# clean up
rm_test_file $ALT_TEST_TEXT_FILE
}
function test_mv_directory {
describe "Testing mv directory function ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
return 1
fi
mk_test_dir
mv ${TEST_DIR} ${TEST_DIR}_rename
if [ ! -d "${TEST_DIR}_rename" ]; then
echo "Directory ${TEST_DIR} was not renamed"
return 1
fi
rmdir ${TEST_DIR}_rename
if [ -e "${TEST_DIR}_rename" ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
return 1
fi
}
function test_redirects {
describe "Testing redirects ..."
mk_test_file ABCDEF
CONTENT=`cat $TEST_TEXT_FILE`
if [ "${CONTENT}" != "ABCDEF" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
return 1
fi
echo XYZ > $TEST_TEXT_FILE
CONTENT=`cat $TEST_TEXT_FILE`
if [ ${CONTENT} != "XYZ" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
return 1
fi
echo 123456 >> $TEST_TEXT_FILE
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
if [ ${LINE1} != "XYZ" ]; then
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
return 1
fi
if [ ${LINE2} != "123456" ]; then
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
return 1
fi
# clean up
rm_test_file
}
function test_mkdir_rmdir {
describe "Testing creation/removal of a directory"
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
return 1
fi
mk_test_dir
rm_test_dir
}
function test_chmod {
describe "Testing chmod file function ..."
# create the test file again
mk_test_file
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
chmod 777 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE permissions"
return 1
fi
# clean up
rm_test_file
}
function test_chown {
describe "Testing chown file function ..."
# create the test file again
mk_test_file
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
chown 1000:1000 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%u:%g $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
if [ $ORIGINAL_PERMISSIONS == "1000:1000" ]
then
echo "Could not be strict check because original file permission 1000:1000"
else
echo "Could not modify $TEST_TEXT_FILE ownership($ORIGINAL_PERMISSIONS to 1000:1000)"
return 1
fi
fi
# clean up
rm_test_file
}
function test_list {
describe "Testing list"
mk_test_file
mk_test_dir
file_cnt=$(ls -1 | wc -l)
if [ $file_cnt != 2 ]; then
echo "Expected 2 file but got $file_cnt"
return 1
fi
rm_test_file
rm_test_dir
}
function test_remove_nonempty_directory {
describe "Testing removing a non-empty directory"
mk_test_dir
touch "${TEST_DIR}/file"
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
rm "${TEST_DIR}/file"
rm_test_dir
}
function test_rename_before_close {
describe "Testing rename before close ..."
(
echo foo
mv $TEST_TEXT_FILE ${TEST_TEXT_FILE}.new
) > $TEST_TEXT_FILE
if ! cmp <(echo foo) ${TEST_TEXT_FILE}.new; then
echo "rename before close failed"
return 1
fi
rm_test_file ${TEST_TEXT_FILE}.new
rm -f ${TEST_TEXT_FILE}
}
function test_multipart_upload {
describe "Testing multi-part upload ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
# Verify contents of file
echo "Comparing test file"
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}"
then
return 1
fi
rm -f "/tmp/${BIG_FILE}"
rm_test_file "${BIG_FILE}"
}
function test_multipart_copy {
describe "Testing multi-part copy ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
mv "${BIG_FILE}" "${BIG_FILE}-copy"
# Verify contents of file
echo "Comparing test file"
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}-copy"
then
return 1
fi
rm -f "/tmp/${BIG_FILE}"
rm_test_file "${BIG_FILE}-copy"
}
function test_special_characters {
describe "Testing special characters ..."
ls 'special' 2>&1 | grep -q 'No such file or directory'
ls 'special?' 2>&1 | grep -q 'No such file or directory'
ls 'special*' 2>&1 | grep -q 'No such file or directory'
ls 'special~' 2>&1 | grep -q 'No such file or directory'
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
}
function test_symlink {
describe "Testing symlinks ..."
rm -f $TEST_TEXT_FILE
rm -f $ALT_TEST_TEXT_FILE
echo foo > $TEST_TEXT_FILE
ln -s $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
cmp $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
rm -f $TEST_TEXT_FILE
[ -L $ALT_TEST_TEXT_FILE ]
[ ! -f $ALT_TEST_TEXT_FILE ]
}
function test_extended_attributes {
command -v setfattr >/dev/null 2>&1 || \
{ echo "Skipping extended attribute tests" ; return; }
describe "Testing extended attributes ..."
rm -f $TEST_TEXT_FILE
touch $TEST_TEXT_FILE
# set value
setfattr -n key1 -v value1 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
# append value
setfattr -n key2 -v value2 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
# remove value
setfattr -x key1 $TEST_TEXT_FILE
! getfattr -n key1 --only-values $TEST_TEXT_FILE
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
}
function test_mtime_file {
describe "Testing mtime preservation function ..."
# if the rename file exists, delete it
if [ -e $ALT_TEST_TEXT_FILE -o -L $ALT_TEST_TEXT_FILE ]
then
rm $ALT_TEST_TEXT_FILE
fi
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
return 1
fi
# create the test file again
mk_test_file
sleep 2 # allow for some time to pass to compare the timestamps between test & alt
#copy the test file with preserve mode
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
testmtime=`stat -c %Y $TEST_TEXT_FILE`
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
if [ "$testmtime" -ne "$altmtime" ]
then
echo "File times do not match: $testmtime != $altmtime"
return 1
fi
}
function test_rm_rf_dir {
describe "Test that rm -rf will remove directory with contents"
# Create a dir with some files and directories
mkdir dir1
mkdir dir1/dir2
touch dir1/file1
touch dir1/dir2/file2
# Remove the dir with recursive rm
rm -rf dir1
if [ -e dir1 ]; then
echo "rm -rf did not remove $PWD/dir1"
return 1
fi
}
function test_write_after_seek_ahead {
describe "Test writes succeed after a seek ahead"
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
rm testfile
}
function add_all_tests {
add_tests test_append_file
add_tests test_truncate_file
add_tests test_truncate_empty_file
add_tests test_mv_file
add_tests test_mv_directory
add_tests test_redirects
add_tests test_mkdir_rmdir
add_tests test_chmod
add_tests test_chown
add_tests test_list
add_tests test_remove_nonempty_directory
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
#add_tests test_rename_before_close
add_tests test_multipart_upload
add_tests test_multipart_copy
add_tests test_special_characters
add_tests test_symlink
add_tests test_extended_attributes
add_tests test_mtime_file
add_tests test_rm_rf_dir
add_tests test_write_after_seek_ahead
}
init_suite
add_all_tests
run_suite

1
test/passwd-s3fs Normal file
View File

@ -0,0 +1 @@
local-identity:local-credential

8
test/s3proxy.conf Normal file
View File

@ -0,0 +1,8 @@
s3proxy.endpoint=http://127.0.0.1:8080
s3proxy.authorization=aws-v4
s3proxy.identity=local-identity
s3proxy.credential=local-credential
jclouds.provider=transient
jclouds.identity=remote-identity
jclouds.credential=remote-credential

View File

@ -1,24 +1,30 @@
# S3FS: Samlpe ahbe_conf parameter file.
# S3FS: Sample ahbe_conf parameter file.
#
# This file is configuration file for additional header by extension(ahbe).
# s3fs loads this file at starting.
#
# Format:
# line = [file suffix] HTTP-header [HTTP-header-values]
# line = [file suffix or regex] HTTP-header [HTTP-header-values]
# file suffix = file(object) suffix, if this field is empty,
# it means "*"(all object).
# it means "reg:(.*)".(=all object).
# regex = regular expression to match the file(object) path.
# this type starts with "reg:" prefix.
# HTTP-header = additional HTTP header name
# HTTP-header-values = additional HTTP header value
#
# <suffix(extension)> <HTTP header> <HTTP header values>
#
# Verification is done in the order in which they are described in the file.
# That order is very important.
#
# Example:
# " Content-Encoding gzip" --> all object
# ".gz Content-Encoding gzip" --> only ".gz" extension file
# " Content-Encoding gzip" --> all object
# ".gz Content-Encoding gzip" --> only ".gz" extension file
# "reg:^/DIR/(.*).t2$ Content-Encoding text2" --> "/DIR/*.t2" extension file
#
# Notice:
# If you need to set all object, you can specify without "suffix".
# Then all of object(file) is added additional header.
# If you need to set all object, you can specify without "suffix" or regex
# type "reg:(.*)". Then all of object(file) is added additional header.
# If you have this configuration file for Content-Encoding, you should
# know about RFC 2616.
#
@ -27,15 +33,20 @@
# Encoding header, and SHOULD NOT be used in the Content-Encoding
# header."
#
.gz Content-Encoding gzip
.Z Content-Encoding compress
.bz2 Content-Encoding bzip2
.svgz Content-Encoding gzip
.svg.gz Content-Encoding gzip
.tgz Content-Encoding gzip
.tar.gz Content-Encoding gzip
.taz Content-Encoding gzip
.tz Content-Encoding gzip
.tbz2 Content-Encoding gzip
gz.js Content-Encoding gzip
# file suffix type
.gz Content-Encoding gzip
.Z Content-Encoding compress
.bz2 Content-Encoding bzip2
.svgz Content-Encoding gzip
.svg.gz Content-Encoding gzip
.tgz Content-Encoding gzip
.tar.gz Content-Encoding gzip
.taz Content-Encoding gzip
.tz Content-Encoding gzip
.tbz2 Content-Encoding gzip
gz.js Content-Encoding gzip
# regex type(test)
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2

View File

@ -1,284 +1,30 @@
#!/bin/bash -e
COMMON=integration-test-common.sh
source $COMMON
#!/bin/bash
#
# Test s3fs-fuse file system operations with
#
set -o errexit
# Require root
REQUIRE_ROOT=require-root.sh
source $REQUIRE_ROOT
#source $REQUIRE_ROOT
# Configuration
TEST_TEXT="HELLO WORLD"
TEST_TEXT_FILE=test-s3fs.txt
TEST_DIR=testdir
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
TEST_TEXT_FILE_LENGTH=15
source integration-test-common.sh
# Mount the bucket
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
then
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
fi
$S3FS $TEST_BUCKET_1 $TEST_BUCKET_MOUNT_POINT_1 -o passwd_file=$S3FS_CREDENTIALS_FILE
CUR_DIR=`pwd`
cd $TEST_BUCKET_MOUNT_POINT_1
start_s3proxy
if [ -e $TEST_TEXT_FILE ]
then
rm -f $TEST_TEXT_FILE
fi
#
# enable_content_md5
# Causes s3fs to validate file contents. This isn't included in the common
# options used by start_s3fs because tests may be performance tests
# singlepart_copy_limit
# Appeared in upstream s3fs-fuse tests, possibly a limitation of S3Proxy
# TODO: github archaeology to see why it was added.
#
start_s3fs -o enable_content_md5 \
-o singlepart_copy_limit=$((10 * 1024))
# Write a small test file
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
do
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
echo $TEST_TEXT >> $TEST_TEXT_FILE
done
./integration-test-main.sh
# Verify contents of file
echo "Verifying length of test file"
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
then
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
exit 1
fi
# Delete the test file
rm $TEST_TEXT_FILE
if [ -e $TEST_TEXT_FILE ]
then
echo "Could not delete file, it still exists"
exit 1
fi
##########################################################
# Rename test (individual file)
##########################################################
echo "Testing mv file function ..."
# if the rename file exists, delete it
if [ -e $ALT_TEST_TEXT_FILE ]
then
rm $ALT_TEST_TEXT_FILE
fi
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
exit 1
fi
# create the test file again
echo $TEST_TEXT > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
then
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
fi
#rename the test file
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
if [ ! -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not move file"
exit 1
fi
# Check the contents of the alt file
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
then
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
exit 1
fi
# clean up
rm $ALT_TEST_TEXT_FILE
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not cleanup file ${ALT_TEST_TEXT_FILE}, it still exists"
exit 1
fi
##########################################################
# Rename test (individual directory)
##########################################################
echo "Testing mv directory function ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
exit 1
fi
mkdir ${TEST_DIR}
if [ ! -d ${TEST_DIR} ]; then
echo "Directory ${TEST_DIR} was not created"
exit 1
fi
mv ${TEST_DIR} ${TEST_DIR}_rename
if [ ! -d "${TEST_DIR}_rename" ]; then
echo "Directory ${TEST_DIR} was not renamed"
exit 1
fi
rmdir ${TEST_DIR}_rename
if [ -e "${TEST_DIR}_rename" ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
exit 1
fi
###################################################################
# test redirects > and >>
###################################################################
echo "Testing redirects ..."
echo ABCDEF > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
then
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
fi
CONTENT=`cat $TEST_TEXT_FILE`
if [ ${CONTENT} != "ABCDEF" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
exit 1
fi
echo XYZ > $TEST_TEXT_FILE
CONTENT=`cat $TEST_TEXT_FILE`
if [ ${CONTENT} != "XYZ" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
exit 1
fi
echo 123456 >> $TEST_TEXT_FILE
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
if [ ${LINE1} != "XYZ" ]; then
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
exit 1
fi
if [ ${LINE2} != "123456" ]; then
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
exit 1
fi
# clean up
rm $TEST_TEXT_FILE
if [ -e $TEST_TEXT_FILE ]
then
echo "Could not cleanup file ${TEST_TEXT_FILE}, it still exists"
exit 1
fi
#####################################################################
# Simple directory test mkdir/rmdir
#####################################################################
echo "Testing creation/removal of a directory"
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
exit 1
fi
mkdir ${TEST_DIR}
if [ ! -d ${TEST_DIR} ]; then
echo "Directory ${TEST_DIR} was not created"
exit 1
fi
rmdir ${TEST_DIR}
if [ -e $TEST_DIR ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
exit 1
fi
##########################################################
# File permissions test (individual file)
##########################################################
echo "Testing chmod file function ..."
# create the test file again
echo $TEST_TEXT > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
then
echo "Could not create file ${TEST_TEXT_FILE}"
exit 1
fi
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
chmod 777 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE permissions"
exit 1
fi
# clean up
rm $TEST_TEXT_FILE
if [ -e $TEST_TEXT_FILE ]
then
echo "Could not cleanup file ${TEST_TEXT_FILE}"
exit 1
fi
##########################################################
# File permissions test (individual file)
##########################################################
echo "Testing chown file function ..."
# create the test file again
echo $TEST_TEXT > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
then
echo "Could not create file ${TEST_TEXT_FILE}"
exit 1
fi
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
chown 1000:1000 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE ownership"
exit 1
fi
# clean up
rm $TEST_TEXT_FILE
if [ -e $TEST_TEXT_FILE ]
then
echo "Could not cleanup file ${TEST_TEXT_FILE}"
exit 1
fi
#####################################################################
# Tests are finished
#####################################################################
# Unmount the bucket
cd $CUR_DIR
umount $TEST_BUCKET_MOUNT_POINT_1
echo "All tests complete."
echo "$0: tests complete."

156
test/test-utils.sh Normal file
View File

@ -0,0 +1,156 @@
#### Test utils
set -o errexit
# Configuration
TEST_TEXT="HELLO WORLD"
TEST_TEXT_FILE=test-s3fs.txt
TEST_DIR=testdir
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
TEST_TEXT_FILE_LENGTH=15
BIG_FILE=big-file-s3fs.txt
BIG_FILE_LENGTH=$((25 * 1024 * 1024))
export RUN_DIR
function mk_test_file {
if [ $# == 0 ]; then
TEXT=$TEST_TEXT
else
TEXT=$1
fi
echo $TEXT > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
then
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
fi
}
function rm_test_file {
if [ $# == 0 ]; then
FILE=$TEST_TEXT_FILE
else
FILE=$1
fi
rm -f $FILE
if [ -e $FILE ]
then
echo "Could not cleanup file ${TEST_TEXT_FILE}"
exit 1
fi
}
function mk_test_dir {
mkdir ${TEST_DIR}
if [ ! -d ${TEST_DIR} ]; then
echo "Directory ${TEST_DIR} was not created"
exit 1
fi
}
function rm_test_dir {
rmdir ${TEST_DIR}
if [ -e $TEST_DIR ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
exit 1
fi
}
# Create and cd to a unique directory for this test run
# Sets RUN_DIR to the name of the created directory
function cd_run_dir {
if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then
echo "TEST_BUCKET_MOUNT_POINT variable not set"
exit 1
fi
RUN_DIR=$(mktemp --directory ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
cd ${RUN_DIR}
}
function clean_run_dir {
if [ -d ${RUN_DIR} ]; then
rm -rf ${RUN_DIR} || echo "Error removing ${RUN_DIR}"
fi
}
# Resets test suite
function init_suite {
TEST_LIST=()
TEST_FAILED_LIST=()
TEST_PASSED_LIST=()
}
# Report a passing test case
# report_pass TEST_NAME
function report_pass {
echo "$1 passed"
TEST_PASSED_LIST+=($1)
}
# Report a failing test case
# report_fail TEST_NAME
function report_fail {
echo "$1 failed"
TEST_FAILED_LIST+=($1)
}
# Add tests to the suite
# add_tests TEST_NAME...
function add_tests {
TEST_LIST+=("$@")
}
# Log test name and description
# describe [DESCRIPTION]
function describe {
echo "${FUNCNAME[1]}: "$@""
}
# Runs each test in a suite and summarizes results. The list of
# tests added by add_tests() is called with CWD set to a tmp
# directory in the bucket. An attempt to clean this directory is
# made after the test run.
function run_suite {
orig_dir=$PWD
cd_run_dir
for t in "${TEST_LIST[@]}"; do
# The following sequence runs tests in a subshell to allow continuation
# on test failure, but still allowing errexit to be in effect during
# the test.
#
# See:
# https://groups.google.com/d/msg/gnu.bash.bug/NCK_0GmIv2M/dkeZ9MFhPOIJ
# Other ways of trying to capture the return value will also disable
# errexit in the function due to bash... compliance with POSIX?
set +o errexit
(set -o errexit; $t)
if [[ $? == 0 ]]; then
report_pass $t
else
report_fail $t
fi
set -o errexit
done
cd ${orig_dir}
clean_run_dir
for t in "${TEST_PASSED_LIST[@]}"; do
echo "PASS: $t"
done
for t in "${TEST_FAILED_LIST[@]}"; do
echo "FAIL: $t"
done
passed=${#TEST_PASSED_LIST[@]}
failed=${#TEST_FAILED_LIST[@]}
echo "SUMMARY for $0: $passed tests passed. $failed tests failed."
if [[ $failed != 0 ]]; then
return 1
else
return 0
fi
}