231 Commits
v1.79 ... v1.80

Author SHA1 Message Date
36917f7780 Merge pull request #426 from s3fs-fuse/ggtakec-patch-directly
Updated to correct ChangeLog
2016-05-29 12:40:28 +09:00
fe44f81ef2 Updated to correct ChangeLog
Because there was an extra line break
2016-05-29 12:37:16 +09:00
a81a2091c3 Merge pull request #425 from ggtakec/master
Updated ChangeLog and configure.ac for release 1.80
2016-05-29 12:29:06 +09:00
88d6c20cde Updated ChangeLog and configure.ac for release 1.80 2016-05-29 03:19:02 +00:00
4ff41f2ebf Merge pull request #424 from ggtakec/master
Added travis CI badge in README.md
2016-05-29 12:02:19 +09:00
a7d2148c60 Added travis CI badge in README.md
Added travis CI badge in README.md.
2016-05-29 11:40:58 +09:00
980c0f81dd Merge pull request #422 from nturner/fix/iam-role-auto
Fixes for iam_role=auto
2016-05-29 09:22:58 +09:00
775e493b0a Merge pull request #420 from nturner/master
Skip early credential checks when iam_role=auto
2016-05-29 09:11:24 +09:00
584ea488bf Use role name instead of profile name when iam_role=auto
When using an instance with an IAM Role, transient credentials can be
found in http://169.254.169.254/latest/meta-data/ at
iam/security-credentials/role-name and s3fs tries to do this. However,
it is using the profile-name where role-name is needed. In many cases
the role and profile name are the same, but they are not always.

The simplest way to find the role name appears to be to GET
http://169.254.169.254/latest/meta-data/iam/security-credentials/
itself, which returns a listing of the role names for which temporary
credentials exist. (I think there will probably only be one, but we
probably want to split on newlines and take the first one here in case
that assumption is not valid). This is the approach the AWS SDK appears
to use (based on WireShark analysis).

Bug: https://github.com/s3fs-fuse/s3fs-fuse/issues/421
Signed-off-by: Nathaniel W. Turner <nate@houseofnate.net>
2016-05-24 13:34:19 -04:00
594c9ca7d2 Skip early credential checks when iam_role=auto
If user specifies iam_role=auto (or just iam_role), credentials will not
be loaded during early phase, so skip credential checks there.

Signed-off-by: Nathaniel W. Turner <nate@houseofnate.net>
2016-05-20 12:49:02 -04:00
c2b7a7e453 Merge pull request #415 from ggtakec/master
Fixed a bug about stat_cache_expire - #382
2016-05-14 18:14:56 +09:00
34b604cdfe Fixed a bug about stat_cache_expire - #382 2016-05-14 09:03:52 +00:00
d16d616f34 Merge pull request #411 from ggtakec/master
loading IAM role name automatically(iam_role option) - #387
2016-05-06 13:57:32 +09:00
50f1ad51c8 loading IAM role name automatically(iam_role option) - #387 2016-05-06 04:37:32 +00:00
fe253c3d22 Merge pull request #410 from ggtakec/master
Allow duplicate key in ahbe_conf - #386
2016-05-06 10:21:50 +09:00
6cc30eea44 Allow duplicate key in ahbe_conf - #386 2016-05-06 01:08:39 +00:00
6be264a17f Merge pull request #409 from ggtakec/master
Fixed 'load_sse_c' option not working - #388
2016-05-06 09:47:39 +09:00
1ddbd4d6bb Fixed 'load_sse_c' option not working - #388 2016-05-06 00:36:54 +00:00
845fdb43f2 Merge pull request #404 from rockuw/keep-alive
Add curl handler pool to reuse connections
2016-04-26 23:40:45 +09:00
72f6c4d2dc Merge pull request #403 from rockuw/master
Fix a bug of truncating empty file
2016-04-26 23:35:22 +09:00
cf23dc78ab Use 'return' instead of 'exit' in test 2016-04-22 16:24:26 +08:00
b78adb4bb0 Add curl handler pool to reuse connections 2016-04-22 14:57:31 +08:00
115bd51f3f Fix a bug of truncating empty file 2016-04-22 14:49:37 +08:00
b979d40778 Merge pull request #397 from ggtakec/master
Supported User-Agent header - #383
2016-04-17 17:01:59 +09:00
10589a9497 Supported User-Agent header - #383 2016-04-17 07:44:03 +00:00
2f5973c02b Merge pull request #395 from ggtakec/master
Fixed writing sparsed file - #375,#379,#394
2016-04-13 03:34:44 +09:00
090c37a1c1 Fixed writing sparsed file - #375,#379,#394 2016-04-12 18:24:36 +00:00
d048f380c1 Merge pull request #394 from s3fs-fuse/revert-379-master
Revert "Fixed a bug about writing sparsed file - #375"
2016-04-13 01:30:34 +09:00
fff40bbff3 Revert "Fixed a bug about writing sparsed file - #375" 2016-04-13 01:24:24 +09:00
daef00e38b Merge pull request #391 from dreh23/patch-1
Update s3fs.1
2016-04-10 15:39:21 +09:00
4ca1b90d00 Merge pull request #385 from mapreri/typo
fix typo in curl.cpp: s/returing/returning/

@mapreri thanks!
2016-04-10 14:02:57 +09:00
c5691b6c7c Merge pull request #376 from RobbKistler/seek-test
Test for writing after an lseek past end of file
2016-04-10 13:00:10 +09:00
fb2ee7cc02 Update s3fs.1 2016-04-09 00:31:01 +02:00
136ec654c2 fix typo in curl.cpp: s/returing/returning/ 2016-04-02 15:19:06 +00:00
4e583583cd Test for writing after an lseek past end of file
This is a test to demonstrate Issue #375
2016-03-23 16:03:38 -07:00
91861e7fcd Merge pull request #379 from ggtakec/master
Fixed a bug about writing sparsed file - #375
2016-03-22 15:06:31 +09:00
ded4faf2e4 Fixed a bug about writing sparsed file - #375 2016-03-22 05:44:14 +00:00
cf56b35766 Merge pull request #372 from ggtakec/master
Fixed a bug about etag comparison in stats cache, etc.
2016-03-13 18:59:38 +09:00
98d55582eb Chnaged about constructor(destructor) in cache.h 2016-03-13 09:47:37 +00:00
84bdd51021 Fixed a bug about etag comparison in stats cache. 2016-03-13 09:29:06 +00:00
fbd8959d69 Merge pull request #371 from ggtakec/master
Always set stats cache for opened file
2016-03-13 15:15:30 +09:00
67efc11d94 Always set stats cache for opened file 2016-03-13 05:43:28 +00:00
d6e6eebb95 Merge pull request #364 from ggtakec/master
Checked content-type by no case-sensitivity - #363
2016-02-13 15:08:23 +09:00
4c65c09f4d Checked content-type by no case-sensitivity - #363 2016-02-13 05:58:59 +00:00
b281328ff4 Merge pull request #359 from yurykats/issue358
Issue 358: Remove optional parameter from Content-Type header
2016-02-11 12:19:25 +09:00
e9d2b38726 Merge pull request #360 from RobbKistler/configure
Fix clock_gettime autotools detection on Linux
2016-02-10 00:41:14 +09:00
f4aac111a4 Fix clock_gettime autotools detection on Linux 2016-02-08 13:45:34 -08:00
230991782b Update s3fs_util.cpp 2016-02-08 16:39:56 -05:00
ac99df5c09 Merge pull request #357 from ggtakec/master
Fixed codes about clock_gettime for osx(3)
2016-02-07 17:30:23 +09:00
f81e6103cb Fixed codes about clock_gettime for osx(3) 2016-02-07 08:27:02 +00:00
cd04cb0875 Merge pull request #356 from ggtakec/master
Fixed codes about clock_gettime for osx(2)
2016-02-07 17:14:32 +09:00
0755c6f60c Fixed codes about clock_gettime for osx(2) 2016-02-07 08:10:23 +00:00
1c9d7a9ea9 Merge pull request #355 from ggtakec/master
Fixed codes about clock_gettime for osx
2016-02-07 16:44:20 +09:00
e01ded9e27 Fixed codes about clock_gettime for osx 2016-02-07 07:40:55 +00:00
bf056b213a Merge pull request #354 from ggtakec/master
Supported regex type for additional header format - #343
2016-02-07 15:16:51 +09:00
1af7aaeccb Fixed addhead.cpp for cppcheck 2016-02-07 05:53:56 +00:00
c7cf86c2ef Sepalated AdditionalHeader class from curl.* 2016-02-07 05:41:56 +00:00
6472eedddc Supported regex type for additional header format. 2016-02-07 05:08:52 +00:00
938554e569 Merge pull request #352 from ggtakec/master
Remove stat file cache dir if specified del_cache - #337
2016-02-07 04:05:45 +09:00
150b83f61e Remove stat file cache dir if specified del_cache - #337 2016-02-06 18:59:13 +00:00
87faed0d04 Merge pull request #351 from ggtakec/master
Check cache dirctory path and attributes - #347
2016-02-06 22:46:16 +09:00
c5a94cfc0c Check cache dirctory path and attributes - #347 2016-02-06 13:38:48 +00:00
f548e8ad5e Merge pull request #348 from RobbKistler/pushtests
Integration test summary, continue on error
2016-02-06 19:06:05 +09:00
203df6b58a Merge pull request #346 from RobbKistler/empty-dir
Fix empty directory check against AWS S3
2016-02-06 18:59:45 +09:00
0ac2f7cded Merge pull request #350 from ggtakec/master
Changed cache out logic for stat - #340
2016-02-06 18:37:58 +09:00
b90b51f2c5 Changed cache out logic for stat - #340 2016-02-06 09:09:17 +00:00
8b457133da Merge pull request #341 from hryang/master
Fix the memory leak issue in fdcache.
2016-02-06 14:41:28 +09:00
7bfaa24d25 Integration test summary, continue on error
Details in README.md and s3fs-integration-test-common.sh

Factor out s3fs-fuse and s3proxy start/stop.  The plan is to make it easier to
add test suites besides small-integration-test.sh that can test with various
s3fs options.

Each test run starts in a uniquely named at the top of the bucket.  This allows
multiple runs against persistent storage without worrying about cleaning
up in error conditions that leave artifiacts behind.

Tests continues if a test case fails.

Results are summarized at the end of the test run

Environment variable to control debug level of s3fs-fuse

Environment variable to enable public bucket (makes it easier to poke
around with tools like curl)

Environment variable to start s3fs-fuse under valgrind

Environment variable that casues script sets up s3fs-fuse and then wait
indefinitely, making it easy to experiment manually with the mount
point.

Additional test case
2016-02-05 05:40:28 -08:00
4eff6b4dd1 Fix empty directory check against AWS S3
For ListBucketResult on an empty directory, AWS S3 and S3Proxy 1.4
differ.  AWS will match the directory name, S3Proxy does not.

Changing max-keys=1 to max-keys-2 works for both implementations.
append_objects_from_xml() will swallow the directory key.  The log
level of this message is changed from ERROR to DBG.

Fixes #345
2016-02-04 23:13:00 -08:00
e3765ad497 Tune the code indent. 2016-01-28 11:16:06 +08:00
dd9f3aed36 Fix the memory leak issue in fdcache. See issue #340 2016-01-28 11:11:53 +08:00
ccfa13f295 Merge pull request #339 from ggtakec/master
Updated README.md for fstab example.
2016-01-24 14:38:48 +09:00
540c04e6cc Updated README.md for fstab example. 2016-01-24 05:34:28 +00:00
4b40727644 Merge pull request #338 from ggtakec/master
Fixed a bug about IAMCRED type could not be retried.
2016-01-24 14:10:39 +09:00
83937700dd Fixed a bug about IAMCRED type could not be retried. 2016-01-24 05:01:50 +00:00
2c156ceea2 Merge pull request #336 from Jirapong/master
update REAME.md for fstab
2016-01-22 00:10:02 +09:00
0615338592 update about netfs on boot 2016-01-19 12:06:10 +07:00
b847872622 update README.md for fstab 2016-01-19 11:22:55 +07:00
e932583309 Merge pull request #334 from andrewgaul/bucket-host
Bucket host should include port and not path
2016-01-17 14:46:40 +09:00
7410b7525f Merge pull request #329 from andrewgaul/v4-signature-get
Correct multiple issues with GET and v4 signing
2016-01-17 14:46:19 +09:00
88a4f04217 Bucket host should include port and not path
This resolves issues when using v4 signing with path-style requests.
2016-01-16 15:58:54 -08:00
ff607e1a2d Correct multiple issues with ListBucketRequest
* provide correct path
* sign query string
* URL encode query string
2016-01-16 10:17:20 -08:00
4bfbfa3621 Merge pull request #331 from andrewgaul/clang
Address various clang warnings
2016-01-16 16:14:50 +09:00
43b91d3235 Merge pull request #330 from andrewgaul/pass-by-reference
Pass by const reference where possible
2016-01-16 16:14:31 +09:00
9fa205f1c3 Merge pull request #328 from andrewgaul/v4-signature-path-request-style
Fix v4 signature with use_path_request_style
2016-01-16 16:14:00 +09:00
e003732f18 Address various clang warnings
Found with:

-Wc++11-extensions
-Wc++11-extra-semi
-Wmissing-variable-declarations
-Wundef
2016-01-11 00:52:24 -08:00
b946b59522 Pass by const reference where possible 2016-01-10 16:58:24 -08:00
ea6b287d1a Fix v4 signature with use_path_request_style
Previously s3fs omitted the bucket name when using path request style
causing SignatureDoesNotMatch with v4 signatures.
2016-01-10 13:41:56 -08:00
a6455ef1bc Merge pull request #323 from andrewgaul/readme
Add goofys to references
2016-01-10 04:26:52 +09:00
8e5e44bfce Add goofys to references 2016-01-07 16:14:11 -08:00
ea151a70c4 Merge pull request #321 from mcellis33/320
320: delete stat cache entry in s3fs_fsync so st_size is refreshed
2015-12-20 15:05:28 +09:00
1e1f2a66de Merge pull request #319 from RobbKistler/clean-exit
Clean up mount point on errors in s3fs_init()
2015-12-20 15:04:54 +09:00
163daa5de1 320: delete stat cache entry in s3fs_fsync so st_size is refreshed 2015-12-18 15:39:25 -08:00
b581290c30 Cleanly exit fuse loop on error in s3fs_init
This allows FUSE to clean the mount point up, preventing
"Transport endpoint not connected" errors on subsequent
access to the mount.
2015-12-15 15:25:56 -08:00
1927ccfe0a Don't loop on fusermount if mountpoint is gone 2015-12-15 15:07:00 -08:00
8162d4925d Merge pull request #313 from mcellis33/gitignore
fix gitignore
2015-12-08 00:04:52 +09:00
2b3ece467b Merge pull request #311 from RobbKistler/dbg-message
Change error log to debug log in s3fs_read()
2015-12-08 00:03:48 +09:00
c2f9b38a95 fix gitignore 2015-12-04 15:21:32 -08:00
8e688816d4 Change error log to debug log in s3fs_read() 2015-12-03 21:25:27 -08:00
8dbd5a3f65 Merge pull request #310 from ggtakec/master
Update integration-test-main.sh as additional change for #300
2015-12-03 22:49:55 +09:00
4bd5ffb0fa Update integration-test-main.sh as additional change for #300 2015-12-03 13:44:43 +00:00
7b2e963636 Merge pull request #300 from bazeli/patch-1
Update integration-test-main.sh
(but it does not work now, I will fix it as soon as possible.)
2015-12-03 22:36:45 +09:00
87d04acb2f Merge pull request #309 from ggtakec/master
Check pthread prtability in configure as additional change for #307
2015-12-03 16:58:23 +09:00
759b44135a Check pthread prtability in configure as additional change for #307 2015-12-03 07:47:17 +00:00
8b53e0d931 Merge pull request #307 from rockuw/master
Fix pthread portability problem
2015-12-03 16:35:30 +09:00
7db23f9d03 Merge pull request #308 from ggtakec/master
Changed ensure free disk space as additional change for #306
2015-12-03 14:49:40 +09:00
3e655bad3b PTHREAD_MUTEX_RECURSIVE_NP is a enum not macro 2015-12-03 13:44:11 +08:00
5e97cb0f48 Changed ensure free disk space as additional change for #306 2015-12-03 05:40:26 +00:00
ef90e0deed Merge pull request #306 from guymguym/patch-1
Fix read concurrency to work in parallel count
2015-12-03 14:26:40 +09:00
f44b61c403 Fix pthread portability problem 2015-12-03 10:44:38 +08:00
Guy
6067af6ef1 Fix read concurrency to work in parallel count
When the prefetch size is limited to the multipart size, the entire parallel logic of the read flow does not have an opportunity to use parallel get.
This fix increases the read performance significantly over our own s3 on-premise solution.
2015-11-30 18:38:15 +02:00
d7a4fc2927 Merge pull request #304 from ggtakec/master
Fixed a bug about mtime - #299
2015-11-30 01:53:13 +09:00
7b62de80f6 Fixed a bug about mtime - #299 2015-11-29 15:53:53 +00:00
8ffff5ba96 Merge remote-tracking branch 'upstream/macosx' 2015-11-29 15:47:47 +00:00
e804441234 Added FAQ wiki page link in README.md 2015-11-26 21:31:03 +09:00
9cc0fd2240 Merge pull request #302 from RobbKistler/syslog-level
Fix syslog level used by S3FS_PRN_EXIT()
2015-11-26 20:35:32 +09:00
fff2952d5f Fix syslog level used by S3FS_PRN_EXIT()
Without converting from s3fs log levels to syslog levels, the syslog
ends up being LOG_EMERG which can cause a broadcast message to all
users.
2015-11-25 13:53:08 -08:00
b85bd53336 Update integration-test-main.sh
new test for mtime preservation copying file with `cp -p`
2015-11-24 17:29:54 +09:00
e1de134d94 Merge branch 'master' into macosx 2015-11-08 06:06:05 +00:00
5af6d4bd82 Merge pull request #295 from ggtakec/fixissue
File opened with O_TRUNC is not flushed - changed #291
2015-11-08 14:14:42 +09:00
c673d9d935 File opened with O_TRUNC is not flushed - changed #291 2015-11-08 04:55:17 +00:00
0fdda61fb5 Merge pull request #293 from SnakeHunt2012/master
Fix a small spelling issue.
2015-11-08 13:26:01 +09:00
331b8456a0 Merge pull request #291 from RobbKistler/truncate
Issue #290: File opened with O_TRUNC is not flushed
2015-11-08 13:23:06 +09:00
63b6f3635b Merge pull request #289 from RobbKistler/log-source-file
Print source file in log messages
2015-11-08 13:14:33 +09:00
c04bcce206 Fix a small spelling issue. 2015-11-06 16:49:37 +08:00
dd7d9268f2 Force flush in s3fs_open() if file is truncated. 2015-11-03 22:06:25 -08:00
a3ef5c820d Add file truncate test
This test creates a file with contents, truncates it to
zero and verifies that it is zero length.
2015-11-03 21:47:15 -08:00
e4da5c59b6 Print source file in log messages 2015-11-03 08:34:02 -08:00
ad2a406205 Merge pull request #288 from ggtakec/master
Fixed a bug about head request(copy) for SSE - issue#286
2015-11-01 23:10:04 +09:00
001206f7c1 Fixed a bug about head request(copy) for SSE - issue#286 2015-11-01 14:05:47 +00:00
2ef7f497f6 Fixed a bug about head request(copy) for SSE - issue#286 2015-11-01 13:54:47 +00:00
497b108109 Merge pull request #285 from andrewgaul/symlink-test
Add test for symlink
2015-11-01 18:46:06 +09:00
86f95b05bf Add test for symlink 2015-10-24 14:20:26 -07:00
70db77af38 Merge pull request #280 from ggtakec/master
Supported a object which is larger than free disk space
2015-10-21 00:53:07 +09:00
8dd234dd8f Fixed bugs about cppcheck error 2015-10-20 15:47:07 +00:00
83d46ef8c6 Fixed bugs about a object larger than free disk space 2015-10-20 15:19:04 +00:00
1b323a6252 Changed debug option to dbglevel in test script. 2015-10-18 17:31:31 +00:00
d102eb752d Supported a object which is larger than free disk space 2015-10-18 17:03:41 +00:00
4252fab685 Merge pull request #248 from andrewgaul/travis-docker
Enable integration tests for Travis
2015-10-19 00:40:28 +09:00
94e3dbb2dc Enable integration tests for Travis
http://blog.travis-ci.com/2015-10-14-opening-up-ubuntu-trusty-beta/
2015-10-14 15:57:15 -07:00
8f115078cd Merge pull request #278 from ggtakec/master
Supported for SSE KMS(#270)
2015-10-07 00:01:38 +09:00
f51ad1f33e Supported for SSE KMS 2015-10-06 14:46:14 +00:00
e29069b8dc Merge pull request #275 from ggtakec/master
Changed and cleaned the logic for debug message.
2015-10-01 05:01:39 +09:00
92e52dadd4 Changed and cleaned the logic for debug message. 2015-09-30 19:41:27 +00:00
a4b00897c1 Merge pull request #274 from ggtakec/master
Modified man page for storage_class option(#271)
2015-09-28 22:52:59 +09:00
f1b7f5ea95 Modified man page for storage_class option(#271) 2015-09-28 13:47:39 +00:00
6a9082f126 Merge pull request #271 from andrewgaul/storage-class
Add support for standard_ia storage class
2015-09-28 22:18:32 +09:00
48f0a6f811 Merge pull request #268 from RobbKistler/loopback
Use 127.0.0.1 not localhost in s3proxy wait loop
2015-09-28 22:12:29 +09:00
1b39b2d450 Merge pull request #267 from nickstinger/master
Added the _netdev option to the fstab example.
2015-09-28 22:11:00 +09:00
785ed642ba Add support for standard_ia storage class
This enables storage with lower at-rest prices, higher request prices,
and lower availability.  Also rework existing reduced redundancy
parsing into a more generic storage class.  More background on
standard_ia:

https://aws.amazon.com/blogs/aws/aws-storage-update-new-lower-cost-s3-storage-option-glacier-price-reduction/
2015-09-17 13:35:25 -07:00
3d5b8a7672 Use 127.0.0.1 not localhost in s3proxy wait loop
localhost doesn't always resolve to 127.0.0.1
2015-09-16 00:06:41 -07:00
0aef0cf765 Added the _netdev option to the fstab example.
Although the network device option (_netdev) may not work everywhere, the option likely does no harm on systems where it's not supported. By adding the option to the example, it will inform users of the necessity for post-network activation and how that might be accomplished.
2015-09-15 10:34:15 +09:00
489f9edec7 Merge pull request #266 from RobbKistler/fix-integration-test
Cleanup from PR #265
2015-09-13 16:44:41 +09:00
718db57ade Code review changes
Missed some cleanup from the code review
2015-09-13 00:31:56 -07:00
639dcf19b0 Merge pull request #265 from RobbKistler/fix-integration-test
Fix integration tests
2015-09-13 15:59:36 +09:00
53bc960224 Merge pull request #263 from RobbKistler/aws
Allow integration testing against Amazon S3
2015-09-13 15:55:28 +09:00
ead346c6d3 Merge pull request #261 from andrewgaul/help-timeouts
Correct help timeouts
2015-09-13 15:49:15 +09:00
375059d9f8 Merge pull request #260 from andrewgaul/help-wrap
Wrap help text at 80 characters
2015-09-13 15:48:28 +09:00
6b21d9d424 Code review changes 2015-09-11 16:09:00 -07:00
dac9844765 Fix remove_nonempty_directory test bug
Wrap the attempt to rmdir in an if statement, otherwise the entire
test process exists (errexit is set). This test expects the rmdir
to fail.
2015-09-11 15:24:17 -07:00
849e66f6a1 Change test_append_file to avoid object read-after-after-overwrite
Open the test file once outside of the tests for loop.  This helps avoid
object consistency problems when running against S3 providers without
strong consistency (like Amazon).  See Issue #263.
2015-09-11 15:24:17 -07:00
6a8a2e4800 Allow integration testing against Amazon S3
Example command line:
S3FS_CREDENTIALS_FILE=keyfile \
TEST_BUCKET_1=somebucket \
S3PROXY_BINARY="" \
URL="http://s3.amazonaws.com" ./small-integration-test.sh
2015-09-11 14:35:12 -07:00
0358908910 Correct help timeouts
Follow-on to #167.
2015-09-10 11:45:05 -07:00
32ce1a7267 Wrap help text at 80 characters 2015-09-10 11:43:09 -07:00
9ea8da839c Merge pull request #258 from juandiegogonzales/patch-1
Update README.md to better explain mount upon boot
2015-09-09 00:15:55 +09:00
39cec488d2 Merge pull request #257 from jesselsteele/patch-1
Update README.md: Bugfix password file permissions errors
2015-09-09 00:15:21 +09:00
96436df18d Merge pull request #256 from andrewgaul/readme
Add no atomic rename to limitations
2015-09-09 00:14:47 +09:00
3aabb5616c Update README.md to better explain mount upon boot
As a novice Linux user, I didn't know I had to add a line into /etc/fstab for automatic mount upon boot. It took me some minutes to research and notice the right process (at first, I even entered s3fs#mybucket into the command line).

This change will (hopefully) save time to unseasoned users.
2015-09-07 10:05:02 -05:00
8e55f45818 Update README.md
received "should not have others permissions" when mounting
did chmod 640 and received "should not have group permissions"
used chmod 600 after creating the password file and had no problems
2015-09-05 15:08:07 +08:00
ec4135c9ed Add no atomic rename to limitations 2015-09-01 13:10:21 -07:00
cfdfecb4d1 Merge pull request #253 from s3fs-fuse/fixmkdirp
Added chacking cache dir perms at starting.
2015-08-23 13:18:50 +09:00
97b8b34aab Added chacking cache dir perms at starting(2). 2015-08-23 04:14:57 +00:00
ce66430fac Added chacking cache dir perms at starting. 2015-08-23 03:57:34 +00:00
1fc56e6665 Merge pull request #252 from Ziggeo/fix-create-cache-directories
This fixes an issue with caching when the creation of a subdirectory …
2015-08-23 10:48:08 +09:00
d7d96907cf This fixes an issue with caching when the creation of a subdirectory within the cache is aborted because a common cached parent directory already exists. 2015-08-21 19:30:04 -04:00
eb97054f49 Merge pull request #251 from flandr/skip-xattr-tests
Skip xattr tests if utilities are missing
2015-08-22 02:11:02 +09:00
7280ca6a69 Skip xattr tests if utilities are missing 2015-08-21 10:05:14 -07:00
30b2a833a8 Merge pull request #250 from s3fs-fuse/issue#228
s3fs can print version with short commit hash - #228
2015-08-22 01:35:07 +09:00
8f8e52b91a s3fs can print version with short commit hash(2) - #228 2015-08-21 16:30:24 +00:00
751c868769 s3fs can print version with short commit hash - #228 2015-08-21 16:19:31 +00:00
c3a47c26ec Merge pull request #249 from andrewgaul/wget-quiet
Silence wget
2015-08-22 00:51:30 +09:00
632578f328 Merge pull request #247 from andrewgaul/base64
Base64 cleanup
2015-08-22 00:45:26 +09:00
5a4240b18d Merge pull request #246 from andrewgaul/coverity
Unlock during early return in TruncateCache
2015-08-22 00:43:34 +09:00
236aeb9dfd Silence wget 2015-08-20 11:38:27 -07:00
bcfadbe1a8 Unlock during early return in TruncateCache
Found via Coverity.  Regression from
dfa63345ed.
2015-08-19 13:54:14 -07:00
b5c027f15d Add unit tests for base64 encoding and decoding 2015-08-19 13:49:10 -07:00
15db80b459 NUL terminate decoded base64 string
For consistency with encoded strings.
2015-08-19 13:48:07 -07:00
76c0ef86e4 Move base64 and hex functions to string_util 2015-08-19 13:47:26 -07:00
a3e820e733 Merge pull request #245 from andrewgaul/map-duplicate-lookups
Elide duplicate lookups of std::map via iterators
2015-08-20 01:22:06 +09:00
a3568a1419 Merge pull request #243 from andrewgaul/cppcheck-travis
Run cppcheck during Travis builds
2015-08-20 01:20:15 +09:00
4ad57bdea5 Merge pull request #240 from andrewgaul/md5
Enable Content-MD5 during multipart upload part
2015-08-20 01:19:01 +09:00
085733d7c9 Merge pull request #239 from andrewgaul/google-code
Update stale Google Code reference in --help
2015-08-20 01:08:00 +09:00
fcb58aec3c Merge pull request #238 from andrewgaul/cppcheck
Enable all cppcheck rules
2015-08-20 01:06:50 +09:00
402c609316 Merge pull request #237 from andrewgaul/test-refactor
Refactor tests into individual functions
2015-08-20 01:03:03 +09:00
026a9f2bdc Merge pull request #235 from andrewgaul/complete-mpu-leak
Plug leak during complete multipart upload
2015-08-20 00:40:00 +09:00
1918d6fa2d Merge pull request #234 from andrewgaul/readme
Update README
2015-08-20 00:37:49 +09:00
fd04b9a437 Merge pull request #233 from andrewgaul/remove-inttostr
Remove IntToStr
2015-08-20 00:34:21 +09:00
ea99603b58 Merge pull request #232 from andrewgaul/stat-cache-locking
Always hold stat_cache_lock when using stat_cache
2015-08-20 00:28:57 +09:00
036612dbb0 Merge pull request #231 from andrewgaul/autolock
Rewrite AutoLock
2015-08-20 00:24:05 +09:00
67d1576dfb Elide duplicate lookups of std::map via iterators
Also remove use of C++11 std::map::at.
2015-08-18 14:00:42 -07:00
2850fe731b Run cppcheck during Travis builds 2015-08-18 03:01:14 -07:00
a157ac59ca Enable Content-MD5 during multipart upload part
This allows retries of multi-part uploads instead of discovering a
fatal error during complete multipart upload.  Also enable Content-MD5
for integration tests and refactor hexadecimal code.
2015-08-18 02:54:00 -07:00
20f425fe15 Update README
This better explains many of the features and limitations and removes
stale information.
2015-08-17 07:48:11 -07:00
32520fd1fb Update stale Google Code reference in --help 2015-08-16 23:30:41 -07:00
c0b21d8808 Enable all cppcheck rules 2015-08-16 17:13:24 -07:00
17d223b542 Refactor tests into individual functions 2015-08-16 15:50:17 -07:00
9c5bf0bb66 Plug leak during complete multipart upload 2015-08-15 22:38:24 -07:00
dfa63345ed Always hold stat_cache_lock when using stat_cache
We could further improve this code by holding stat_cache_lock before
calls to DelStat instead of unlocking then relocking it.
2015-08-14 20:14:12 -07:00
3f59b8da01 Rewrite AutoLock
Previously AutoLock::Lock allowed subsequent callers to proceed
without the lock.  Further is_locked was not always protected by
auto_mutex.  Finally AutoLock eagerly released auto_mutex when
recursively unlocking.  s3fs does not need recursive locks so we
rewrite and simplify AutoLock.  Partially surfaced by Coverity.
2015-08-14 20:00:56 -07:00
0ea88a73c7 Remove IntToStr
str duplicates this functionality.  Also add unit test.
2015-08-12 08:25:09 -07:00
2e344bb48f Merge pull request #229 from andrewgaul/test-rename-before-close
Convert rename_before_close to a shell script
2015-08-13 00:10:07 +09:00
c91a645782 Convert rename_before_close to a shell script #229 2015-08-12 15:09:34 +00:00
96f63a17c0 Merge pull request #224 from andrewgaul/cppcheck
Configure cppcheck
2015-08-13 00:05:31 +09:00
756d1e5e81 Configure cppcheck #224 2015-08-12 15:04:16 +00:00
2482aada43 Merge pull request #222 from andrewgaul/explicit
Annotate constructors as explicit
2015-08-12 23:42:03 +09:00
64146f69a4 Merge pull request #221 from andrewgaul/compare
Compare idiomatically
2015-08-12 23:41:24 +09:00
edb3c78fe9 Merge pull request #220 from andrewgaul/test-rmdir-nonempty-directory
Test removing a non-empty directory
2015-08-12 23:41:08 +09:00
49e32967ec Merge pull request #219 from andrewgaul/coverity
Address Coverity errors
2015-08-12 23:40:47 +09:00
5655cffd32 Merge pull request #217 from jelly/master
Override install, so that the make install does not install rename_before_close under /test
2015-08-12 23:40:18 +09:00
09dff484e1 Merge pull request #215 from RobbKistler/memleak
Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
2015-08-12 23:38:51 +09:00
deb0e9eec3 Merge pull request #213 from andrewgaul/rename-large-files
Parse ETag from copy multipart correctly
2015-08-12 23:38:22 +09:00
5d1c8a7eda Convert rename_before_close to a shell script 2015-08-11 20:51:18 -07:00
ff8a0c2eea Parse ETag from copy multipart correctly
Previously s3fs misparsed this, preventing renames of files larger
than 5 GB.  Integration test disabled until S3Proxy 1.5.0 is released.
2015-08-11 14:43:35 -07:00
cbf7777f41 Configure cppcheck 2015-08-08 05:18:51 -07:00
fcb55c2109 Fix mem leaks in openssl_auth.cpp, nss_auth.cpp
Fix memory leaks in openssl_auth.cpp:s3fs_sha256hexsum and
nss_auth.cpp:s3fs_sha256hexsum.  Leaks occur every time a file
is created.
2015-08-06 12:45:40 -07:00
b6fa2deb9f Annotate constructors as explicit
This prevents implicit conversions.
2015-08-05 23:41:53 -07:00
801ca0c2d3 Compare idiomatically 2015-08-05 23:35:08 -07:00
5f792a9a2b Test removing a non-empty directory 2015-08-05 23:31:13 -07:00
8ee71caabb Address Coverity errors
Fixed an uninitialized member, misordered NULL check, resource leak,
and unconsumed return value.
2015-08-05 23:28:06 -07:00
ed70f7763a Override install, so that the make install does not install
rename_before_close under /test
2015-08-01 17:15:00 +02:00
730262f000 Merge pull request #212 from s3fs-fuse/master
update content of the master to macosx branch.
2015-07-20 01:38:42 +09:00
40 changed files with 4989 additions and 2433 deletions

4
.gitignore vendored
View File

@ -7,6 +7,9 @@
/config.log
/config.status
/config.sub
/stamp-h1
/config.h
/config.h.in
/configure
/depcomp
/test-driver
@ -24,3 +27,4 @@
/test/Makefile
/test/Makefile.in
/test/*.log
/default_commit_hash

View File

@ -1,17 +1,16 @@
language: cpp
sudo: required
dist: trusty
cache: apt
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq libfuse-dev
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
script:
- ./autogen.sh
- ./configure
- make
- make cppcheck
- make check -C src
# Travis granted s3fs access to their upcoming alpha testing stack which may
# allow us to use FUSE.
# TODO: Travis changed their infrastructure some time in June 2015 such that
# this does not work currently
#- modprobe fuse
#- make check -C test
- modprobe fuse
- make check -C test
- cat test/test-suite.log

103
ChangeLog
View File

@ -1,6 +1,109 @@
ChangeLog for S3FS
------------------
Version 1.80 -- May 29, 2016
#213 - Parse ETag from copy multipart correctly
#215 - Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
#217 - Override install, so that the make install does not install rename_before_close under /test
#219 - Address Coverity errors
#220 - Test removing a non-empty directory
#221 - Compare idiomatically
#222 - Annotate constructors as explicit
#224 - Configure cppcheck
#229 - Convert rename_before_close to a shell script
#231 - Rewrite AutoLock
#232 - Always hold stat_cache_lock when using stat_cache
#233 - Remove IntToStr
#234 - Update README
#235 - Plug leak during complete multipart upload
#237 - Refactor tests into individual functions
#238 - Enable all cppcheck rules
#239 - Update stale Google Code reference in --help
#240 - Enable Content-MD5 during multipart upload part
#243 - Run cppcheck during Travis builds
#245 - Elide duplicate lookups of std::map via iterators
#246 - Unlock during early return in TruncateCache
#247 - Base64 cleanup
#248 - Enable integration tests for Travis
#249 - Silence wget
#250 - s3fs can print version with short commit hash - #228
#251 - Skip xattr tests if utilities are missing
#252 - This fixes an issue with caching when the creation of a subdirectory …
#253 - Added chacking cache dir perms at starting.
#256 - Add no atomic rename to limitations
#257 - Update README.md: Bugfix password file permissions errors
#258 - Update README.md to better explain mount upon boot
#260 - Wrap help text at 80 characters
#261 - Correct help timeouts
#263 - Allow integration testing against Amazon S3
#265 - Fix integration tests
#266 - Cleanup from PR #265
#267 - Added the _netdev option to the fstab example.
#268 - Use 127.0.0.1 not localhost in s3proxy wait loop
#271 - Add support for standard_ia storage class
#274 - Modified man page for storage_class option(#271)
#275 - Changed and cleaned the logic for debug message.
#278 - Supported for SSE KMS(#270)
#280 - Supported a object which is larger than free disk space
#285 - Add test for symlink
#288 - Fixed a bug about head request(copy) for SSE - issue#286
#289 - Print source file in log messages
#291 - File opened with O_TRUNC is not flushed - Issue #290
#293 - Fix a small spelling issue.
#295 - File opened with O_TRUNC is not flushed - changed #291
#300 - Update integration-test-main.sh
#302 - Fix syslog level used by S3FS_PRN_EXIT()
#304 - Fixed a bug about mtime - #299
#306 - Fix read concurrency to work in parallel count
#307 - Fix pthread portability problem
#308 - Changed ensure free disk space as additional change for #306
#309 - Check pthread prtability in configure as additional change for #307
#310 - Update integration-test-main.sh as additional change for #300
#311 - Change error log to debug log in s3fs_read()
#313 - fix gitignore
#319 - Clean up mount point on errors in s3fs_init()
#321 - delete stat cache entry in s3fs_fsync so st_size is refreshed - #320
#323 - Add goofys to references
#328 - Fix v4 signature with use_path_request_style
#329 - Correct multiple issues with GET and v4 signing
#330 - Pass by const reference where possible
#331 - Address various clang warnings
#334 - Bucket host should include port and not path
#336 - update REAME.md for fstab
#338 - Fixed a bug about IAMCRED type could not be retried.
#339 - Updated README.md for fstab example.
#341 - Fix the memory leak issue in fdcache.
#346 - Fix empty directory check against AWS S3
#348 - Integration test summary, continue on error
#350 - Changed cache out logic for stat - #340
#351 - Check cache dirctory path and attributes - #347
#352 - Remove stat file cache dir if specified del_cache - #337
#354 - Supported regex type for additional header format - #343
#355 - Fixed codes about clock_gettime for osx
#356 - Fixed codes about clock_gettime for osx(2)
#357 - Fixed codes about clock_gettime for osx(3)
#359 - Remove optional parameter from Content-Type header - #358
#360 - Fix clock_gettime autotools detection on Linux
#364 - Checked content-type by no case-sensitivity - #363
#371 - Always set stats cache for opened file
#372 - Fixed a bug about etag comparison in stats cache, etc.
#376 - Test for writing after an lseek past end of file
#379 - Fixed a bug about writing sparsed file - #375
#385 - fix typo in curl.cpp: s/returing/returning/
#391 - Update s3fs.1
#394 - Revert "Fixed a bug about writing sparsed file - #375"
#395 - Fixed writing sparsed file - #375,#379,#394
#397 - Supported User-Agent header - #383
#403 - Fix a bug of truncating empty file
#404 - Add curl handler pool to reuse connections
#409 - Fixed 'load_sse_c' option not working - #388
#410 - Allow duplicate key in ahbe_conf - #386
#411 - loading IAM role name automatically(iam_role option) - #387
#415 - Fixed a bug about stat_cache_expire - #382
#420 - Skip early credential checks when iam_role=auto
#422 - Fixes for iam_role=auto
#424 - Added travis CI badge in README.md
Version 1.79 -- Jul 19, 2015
issue #60 - Emit user-friendly log messages on failed CheckBucket requests
issue #62 - Remove stray chars from source files

View File

@ -19,7 +19,7 @@
######################################################################
SUBDIRS=src test doc
EXTRA_DIST=doc
EXTRA_DIST=doc default_commit_hash
dist-hook:
rm -rf `find $(distdir)/doc -type d -name .svn`
@ -28,3 +28,12 @@ dist-hook:
release : dist ../utils/release.sh
../utils/release.sh $(DIST_ARCHIVES)
cppcheck:
cppcheck --quiet --error-exitcode=1 \
-U CURLE_PEER_FAILED_VERIFICATION \
--enable=all \
--suppress=missingIncludeSystem \
--suppress=unsignedLessThanZero \
--suppress=unusedFunction \
--suppress=variableScope \
src/ test/

0
NEWS
View File

67
README
View File

@ -1,67 +0,0 @@
THIS README CONTAINS OUTDATED INFORMATION - please refer to the wiki or --help
S3FS-Fuse
S3FS is FUSE (File System in User Space) based solution to mount/unmount an Amazon S3 storage buckets and use system commands with S3 just like it was another Hard Disk.
In order to compile s3fs, You'll need the following requirements:
* Kernel-devel packages (or kernel source) installed that is the SAME version of your running kernel
* LibXML2-devel packages
* CURL-devel packages (or compile curl from sources at: curl.haxx.se/ use 7.15.X)
* GCC, GCC-C++
* pkgconfig
* FUSE (>= 2.8.4)
* FUSE Kernel module installed and running (RHEL 4.x/CentOS 4.x users - read below)
* OpenSSL-devel (0.9.8)
GnuTLS(gcrypt and nettle)
NSS
* Git
If you're using YUM or APT to install those packages, then it might require additional packaging, allow it to be installed.
Downloading & Compiling:
------------------------
In order to download s3fs, download from following url:
https://github.com/s3fs-fuse/s3fs-fuse/archive/master.zip
Or clone the following command:
git clone git://github.com/s3fs-fuse/s3fs-fuse.git
Go inside the directory that has been created (s3fs-fuse) and run: ./autogen.sh
This will generate a number of scripts in the project directory, including a configure script which you should run with: ./configure
If configure succeeded, you can now run: make. If it didn't, make sure you meet the dependencies above.
This should compile the code. If everything goes OK, you'll be greeted with "ok!" at the end and you'll have a binary file called "s3fs"
in the src/ directory.
As root (you can use su, su -, sudo) do: "make install" -this will copy the "s3fs" binary to /usr/local/bin.
Congratulations. S3fs is now compiled and installed.
Usage:
------
In order to use s3fs, make sure you have the Access Key and the Secret Key handy. (refer to the wiki)
First, create a directory where to mount the S3 bucket you want to use.
Example (as root): mkdir -p /mnt/s3
Then run: s3fs mybucket[:path] /mnt/s3
This will mount your bucket to /mnt/s3. You can do a simple "ls -l /mnt/s3" to see the content of your bucket.
If you want to allow other people access the same bucket in the same machine, you can add "-o allow_other" to read/write/delete content of the bucket.
You can add a fixed mount point in /etc/fstab, here's an example:
s3fs#mybucket /mnt/s3 fuse allow_other 0 0
This will mount upon reboot (or by launching: mount -a) your bucket on your machine.
If that does not work, probably you should specify with "_netdev" option in fstab.
All other options can be read at: https://github.com/s3fs-fuse/s3fs-fuse/wiki/Fuse-Over-Amazon
Known Issues:
-------------
s3fs should be working fine with S3 storage. However, There are couple of limitations:
* Currently s3fs could hang the CPU if you have lots of time-outs. This is *NOT* a fault of s3fs but rather libcurl. This happens when you try to copy thousands of files in 1 session, it doesn't happen when you upload hundreds of files or less.
* CentOS 4.x/RHEL 4.x users - if you use the kernel that shipped with your distribution and didn't upgrade to the latest kernel RedHat/CentOS gives, you might have a problem loading the "fuse" kernel. Please upgrade to the latest kernel (2.6.16 or above) and make sure "fuse" kernel module is compiled and loadable since FUSE requires this kernel module and s3fs requires it as well.
* Moving/renaming/erasing files takes time since the whole file needs to be accessed first. A workaround could be to use s3fs's cache support with the use_cache option.

128
README.md Normal file
View File

@ -0,0 +1,128 @@
s3fs
====
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
[![Build Status](https://travis-ci.org/s3fs-fuse/s3fs-fuse.svg?branch=master)](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
Features
--------
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
* large files via multi-part upload
* renames via server-side copy
* optional server-side encryption
* data integrity via MD5 hashes
* in-memory metadata caching
* local disk data caching
* user-specified regions, including Amazon GovCloud
* authenticate via v2 or v4 signatures
Installation
------------
Ensure you have all the dependencies:
On Ubuntu 14.04:
```
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
```
On CentOS 7:
```
sudo yum install automake fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
```
Compile from master via the following commands:
```
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
```
Examples
--------
Enter your S3 identity and credential in a file `/path/to/passwd`:
```
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
```
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
```
chmod 600 /path/to/passwd
```
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
```
If you encounter any errors, enable debug output:
```
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
```
You can also mount on boot by entering the following line to `/etc/fstab`:
```
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
or
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
```
Note: You may also want to create the global credential file first
```
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
chmod 600 /path/to/passwd
```
Note2: You may also need to make sure `netfs` service is start on boot
Limitations
-----------
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
* random writes or appends to files require rewriting the entire file
* metadata operations such as listing directories have poor performance due to network latency
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data
* no atomic renames of files or directories
* no coordination between multiple clients mounting the same bucket
* no hard links
References
----------
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
Frequently Asked Questions
--------------------------
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
License
-------
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
Licensed under the GNU GPL version 2

View File

@ -19,6 +19,28 @@
#
# See the file ChangeLog for a revision history.
echo "--- Make commit hash file -------"
SHORTHASH="unknown"
type git > /dev/null 2>&1
if [ $? -eq 0 -a -d .git ]; then
RESULT=`git rev-parse --short HEAD`
if [ $? -eq 0 ]; then
SHORTHASH=${RESULT}
fi
fi
echo ${SHORTHASH} > default_commit_hash
echo "--- Finished commit hash file ---"
echo "--- Start autotools -------------"
aclocal \
&& autoheader \
&& automake --add-missing \
&& autoconf
echo "--- Finished autotools ----------"
exit 0

View File

@ -20,16 +20,20 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
AC_INIT(s3fs, 1.79)
AC_INIT(s3fs, 1.80)
AC_CONFIG_HEADER([config.h])
AC_CANONICAL_SYSTEM
AM_INIT_AUTOMAKE()
AM_INIT_AUTOMAKE([foreign])
AC_PROG_CXX
AC_PROG_CC
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
dnl ----------------------------------------------
dnl For OSX
dnl ----------------------------------------------
case "$target" in
*-darwin* )
# Do something specific for mac
@ -219,12 +223,67 @@ AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle])
AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss])
dnl ----------------------------------------------
dnl end of ssl library
dnl check functions
dnl ----------------------------------------------
dnl malloc_trim function
AC_CHECK_FUNCS(malloc_trim, , )
AC_CHECK_FUNCS([malloc_trim])
dnl clock_gettime function(osx)
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
AC_CHECK_FUNCS([clock_gettime])
dnl ----------------------------------------------
dnl check symbols/macros/enums
dnl ----------------------------------------------
dnl PTHREAD_MUTEX_RECURSIVE
AC_MSG_CHECKING([pthread mutex recursive])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <pthread.h>]],
[[int i = PTHREAD_MUTEX_RECURSIVE;]])
],
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE, [Define if you have PTHREAD_MUTEX_RECURSIVE])
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE)
],
[AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <pthread.h>]],
[[int i = PTHREAD_MUTEX_RECURSIVE_NP;]])
],
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE_NP, [Define if you have PTHREAD_MUTEX_RECURSIVE_NP])
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE_NP)
],
[AC_MSG_ERROR([do not have PTHREAD_MUTEX_RECURSIVE symbol])])
]
)
dnl ----------------------------------------------
dnl output files
dnl ----------------------------------------------
AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile)
dnl ----------------------------------------------
dnl short commit hash
dnl ----------------------------------------------
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no])
AC_MSG_CHECKING([github short commit hash])
if test “x${GITCMD}” = “xyes” -a “x${DOTGITDIR}” = “xyes”; then
GITCOMMITHASH=`git rev-parse --short HEAD`
elif test -f default_commit_hash; then
GITCOMMITHASH=`cat default_commit_hash`
else
GITCOMMITHASH="unknown"
fi
AC_MSG_RESULT([${GITCOMMITHASH}])
AC_DEFINE_UNQUOTED([COMMIT_HASH_VAL], ["${GITCOMMITHASH}"], [short commit hash value on github])
dnl ----------------------------------------------
dnl put
dnl ----------------------------------------------
AC_OUTPUT
dnl ----------------------------------------------
dnl end configuration
dnl ----------------------------------------------

View File

@ -62,19 +62,41 @@ local folder to use for local file cache.
\fB\-o\fR del_cache - delete local file cache
delete local file cache when s3fs starts and exits.
.TP
\fB\-o\fR storage_class (default is standard)
store object with specified storage class.
this option replaces the old option use_rrs.
Possible values: standard, standard_ia, and reduced_redundancy.
.TP
\fB\-o\fR use_rrs (default is disable)
use Amazon's Reduced Redundancy Storage.
this option can not be specified with use_sse.
(can specify use_rrs=1 for old version)
this option has been replaced by new storage_class option.
.TP
\fB\-o\fR use_sse (default is disable)
use Amazon's Server-Site Encryption or Server-Side Encryption with Customer-Provided Encryption Keys.
this option can not be specified with use_rrs. specifying only "use_sse" or "use_sse=1" enables Server-Side Encryption.(use_sse=1 for old version)
specifying this option with file path which has some SSE-C secret key enables Server-Side Encryption with Customer-Provided Encryption Keys.(use_sse=file)
the file must be 600 permission. the file can have some lines, each line is one SSE-C key. the first line in file is used as Customer-Provided Encryption Keys for uploading and change headers etc.
if there are some keys after first line, those are used downloading object which are encripted by not first key.
so that, you can keep all SSE-C keys in file, that is SSE-C key history.
if AWSSSECKEYS environment is set, you can set SSE-C key instead of this option.
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>"(only <custom key file path> specified is old type parameter).
You can use "c" for short "custom".
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
This option is used to decide the SSE type.
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloaing, you can use load_sse_c option instead of this option.
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
You can use "k" for short "kmsid".
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
You must be careful about that you can not use the KMS id which is not same EC2 region.
.TP
\fB\-o\fR load_sse_c - specify SSE-C keys
Specify the custom-provided encryption keys file path for decrypting at downloading.
If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom".
The file has many lines, one line means one custom key.
So that you can keep all SSE-C keys in file, that is SSE-C key history.
AWSSSECKEYS environment is as same as this file contents.
.TP
\fB\-o\fR passwd_file (default="")
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
@ -83,16 +105,17 @@ specify the path to the password file, which which takes precedence over the pas
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
The configuration file format is below:
-----------
line = [file suffix] HTTP-header [HTTP-values]
file suffix = file(object) suffix, if this field is empty, it means "*"(all object).
line = [file suffix or regex] HTTP-header [HTTP-values]
file suffix = file(object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
regex = regular expression to match the file(object) path. this type starts with "reg:" prefix.
HTTP-header = additional HTTP header name
HTTP-values = additional HTTP header value
-----------
Sample:
-----------
.gz Content-Encoding gzip
.Z Content-Encoding compress
X-S3FS-MYHTTPHEAD myvalue
.gz Content-Encoding gzip
.Z Content-Encoding compress
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
-----------
A sample configuration file is uploaded in "test" directory.
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
@ -118,7 +141,8 @@ s3fs always has to check whether file(or sub directory) exists under object(path
It increases ListBucket request and makes performance bad.
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
.TP
\fB\-o\fR no_check_certificate (by default this option is disabled) - do not check ssl certificate.
\fB\-o\fR no_check_certificate (by default this option is disabled)
do not check ssl certificate.
server certificate won't be checked against the available certificate authorities.
.TP
\fB\-o\fR nodnscache - disable dns cache.
@ -135,26 +159,23 @@ number of parallel request for uploading big objects.
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
This option limits parallel request count which s3fs requests at once.
It is necessary to set this value depending on a CPU and a network band.
This option is lated to fd_page_size option and affects it.
.TP
\fB\-o\fR fd_page_size(default="52428800"(50MB))
number of internal management page size for each file descriptor.
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
This option should not be changed when you don't have a trouble with performance.
This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size).
.TP
\fB\-o\fR multipart_size(default="10"(10MB))
number of one part size in multipart uploading request.
The default size is 10MB(10485760byte), this value is minimum size.
Specify number of MB and over 10(MB).
This option is lated to fd_page_size option and affects it.
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
Specify number of MB and over 5(MB).
.TP
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
s3fs makes file for downloading, and uploading and caching files.
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
.TP
\fB\-o\fR url (default="http://s3.amazonaws.com")
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
.TP
\fB\-o\fR endpoint (default="us-east-1")
sets the endpoint to use.
If this option is not specified, s3fs uses \"us-east-1\" region as the default.
If this option is not specified, s3fs uses "us-east-1" region as the default.
If the s3fs could not connect to the region specified by this option, s3fs could not run.
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
@ -176,8 +197,8 @@ Enable to send "Content-MD5" header when uploading a object without multipart po
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
.TP
\fB\-o\fR iam_role ( default is no role )
set the IAM Role that will supply the credentials from the instance meta-data.
\fB\-o\fR iam_role ( default is no IAM role )
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
.TP
\fB\-o\fR noxmlns - disable registing xml name space.
disable registing xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
@ -194,6 +215,18 @@ If this option is specified with nocopapi, the s3fs ignores it.
.TP
\fB\-o\fR use_path_request_style (use legacy API calling style)
Enble compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
.TP
\fB\-o\fR noua (suppress User-Agent header)
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
If this option is specified, s3fs suppresses the output of the User-Agent.
.TP
\fB\-o\fR dbglevel (default="crit")
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
If s3fs run with "-d" option, the debug level is set information.
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
.TP
\fB\-o\fR curldbg - put curl debug message
Put the debug message from libcurl when this option is specified.
.SH FUSE/MOUNT OPTIONS
.TP
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.

View File

@ -24,7 +24,7 @@ if USE_GNUTLS_NETTLE
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
endif
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h common.h
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h addhead.cpp addhead.h common.h
if USE_SSL_OPENSSL
s3fs_SOURCES += openssl_auth.cpp
endif
@ -39,6 +39,6 @@ s3fs_LDADD = $(DEPS_LIBS)
noinst_PROGRAMS = test_string_util
test_string_util_SOURCES = string_util.cpp test_string_util.cpp
test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h
TESTS = test_string_util

286
src/addhead.cpp Normal file
View File

@ -0,0 +1,286 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <assert.h>
#include <curl/curl.h>
#include <sstream>
#include <fstream>
#include <string>
#include <map>
#include <list>
#include <vector>
#include "common.h"
#include "addhead.h"
#include "curl.h"
#include "s3fs.h"
using namespace std;
//-------------------------------------------------------------------
// Symbols
//-------------------------------------------------------------------
#define ADD_HEAD_REGEX "reg:"
//-------------------------------------------------------------------
// Class AdditionalHeader
//-------------------------------------------------------------------
AdditionalHeader AdditionalHeader::singleton;
//-------------------------------------------------------------------
// Class AdditionalHeader method
//-------------------------------------------------------------------
AdditionalHeader::AdditionalHeader()
{
if(this == AdditionalHeader::get()){
is_enable = false;
}else{
assert(false);
}
}
AdditionalHeader::~AdditionalHeader()
{
if(this == AdditionalHeader::get()){
Unload();
}else{
assert(false);
}
}
bool AdditionalHeader::Load(const char* file)
{
if(!file){
S3FS_PRN_WARN("file is NULL.");
return false;
}
Unload();
ifstream AH(file);
if(!AH.good()){
S3FS_PRN_WARN("Could not open file(%s).", file);
return false;
}
// read file
string line;
PADDHEAD paddhead;
while(getline(AH, line)){
if('#' == line[0]){
continue;
}
if(0 == line.size()){
continue;
}
// load a line
stringstream ss(line);
string key(""); // suffix(key)
string head; // additional HTTP header
string value; // header value
if(0 == isblank(line[0])){
ss >> key;
}
if(ss){
ss >> head;
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
value = line.substr(static_cast<int>(ss.tellg()) + 1);
}
}
// check it
if(0 == head.size()){
if(0 == key.size()){
continue;
}
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
Unload();
return false;
}
paddhead = new ADDHEAD;
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
// regex
if(key.size() <= strlen(ADD_HEAD_REGEX)){
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
continue;
}
key = key.substr(strlen(ADD_HEAD_REGEX));
// compile
regex_t* preg = new regex_t;
int result;
char errbuf[256];
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
regerror(result, preg, errbuf, sizeof(errbuf));
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
delete preg;
delete paddhead;
continue;
}
// set
paddhead->pregex = preg;
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
}else{
// not regex, directly comparing
paddhead->pregex = NULL;
paddhead->basestring = key;
paddhead->headkey = head;
paddhead->headvalue = value;
}
// add list
addheadlist.push_back(paddhead);
// set flag
if(!is_enable){
is_enable = true;
}
}
return true;
}
void AdditionalHeader::Unload(void)
{
is_enable = false;
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); iter = addheadlist.erase(iter)){
PADDHEAD paddhead = *iter;
if(paddhead){
if(paddhead->pregex){
regfree(paddhead->pregex);
delete paddhead->pregex;
}
delete paddhead;
}
}
}
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
{
if(!is_enable){
return true;
}
if(!path){
S3FS_PRN_WARN("path is NULL.");
return false;
}
size_t pathlength = strlen(path);
// loop
//
// [NOTE]
// Because to allow duplicate key, and then scanning the entire table.
//
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
const PADDHEAD paddhead = *iter;
if(!paddhead){
continue;
}
if(paddhead->pregex){
// regex
regmatch_t match; // not use
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
}else{
// directly comparing
if(paddhead->basestring.length() < pathlength){
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
// match -> adding header
meta[paddhead->headkey] = paddhead->headvalue;
}
}
}
}
return true;
}
struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const
{
headers_t meta;
if(!AddHeader(meta, path)){
return list;
}
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
// Adding header
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
}
meta.clear();
S3FS_MALLOCTRIM(0);
return list;
}
bool AdditionalHeader::Dump(void) const
{
if(!IS_S3FS_LOG_DBG()){
return true;
}
stringstream ssdbg;
int cnt = 1;
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
const PADDHEAD paddhead = *iter;
ssdbg << " [" << cnt << "] = {" << endl;
if(paddhead){
if(paddhead->pregex){
ssdbg << " type\t\t--->\tregex" << endl;
}else{
ssdbg << " type\t\t--->\tsuffix matching" << endl;
}
ssdbg << " base string\t--->\t" << paddhead->basestring << endl;
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl;
}
ssdbg << " }" << endl;
}
ssdbg << "}" << endl;
// print all
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
return true;
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

70
src/addhead.h Normal file
View File

@ -0,0 +1,70 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_ADDHEAD_H_
#define S3FS_ADDHEAD_H_
#include <regex.h>
//----------------------------------------------
// class AdditionalHeader
//----------------------------------------------
typedef struct add_header{
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
std::string basestring;
std::string headkey;
std::string headvalue;
}ADDHEAD, *PADDHEAD;
typedef std::vector<PADDHEAD> addheadlist_t;
class AdditionalHeader
{
private:
static AdditionalHeader singleton;
bool is_enable;
addheadlist_t addheadlist;
protected:
AdditionalHeader();
~AdditionalHeader();
public:
// Reference singleton
static AdditionalHeader* get(void) { return &singleton; }
bool Load(const char* file);
void Unload(void);
bool AddHeader(headers_t& meta, const char* path) const;
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
bool Dump(void) const;
};
#endif // S3FS_ADDHEAD_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

View File

@ -21,6 +21,9 @@
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#ifndef HAVE_CLOCK_GETTIME
#include <sys/time.h>
#endif
#include <unistd.h>
#include <stdint.h>
#include <pthread.h>
@ -29,6 +32,7 @@
#include <syslog.h>
#include <string>
#include <map>
#include <vector>
#include <algorithm>
#include <list>
@ -39,6 +43,91 @@
using namespace std;
//-------------------------------------------------------------------
// Utility
//-------------------------------------------------------------------
#ifndef CLOCK_REALTIME
#define CLOCK_REALTIME 0
#endif
#ifndef CLOCK_MONOTONIC
#define CLOCK_MONOTONIC CLOCK_REALTIME
#endif
#ifndef CLOCK_MONOTONIC_COARSE
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#endif
#ifndef HAVE_CLOCK_GETTIME
static int clock_gettime(int clk_id, struct timespec* ts)
{
struct timeval now;
if(0 != gettimeofday(&now, NULL)){
return -1;
}
ts->tv_sec = now.tv_sec;
ts->tv_nsec = now.tv_usec * 1000;
return 0;
}
#endif
inline void SetStatCacheTime(struct timespec& ts)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){
ts.tv_sec = time(NULL);
ts.tv_nsec = 0;
}
}
inline void InitStatCacheTime(struct timespec& ts)
{
ts.tv_sec = 0;
ts.tv_nsec = 0;
}
inline int CompareStatCacheTime(struct timespec& ts1, struct timespec& ts2)
{
// return -1: ts1 < ts2
// 0: ts1 == ts2
// 1: ts1 > ts2
if(ts1.tv_sec < ts2.tv_sec){
return -1;
}else if(ts1.tv_sec > ts2.tv_sec){
return 1;
}else{
if(ts1.tv_nsec < ts2.tv_nsec){
return -1;
}else if(ts1.tv_nsec > ts2.tv_nsec){
return 1;
}
}
return 0;
}
inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expire)
{
struct timespec nowts;
SetStatCacheTime(nowts);
return ((ts.tv_sec + expire) < nowts.tv_sec);
}
//
// For cache out
//
typedef std::vector<stat_cache_t::iterator> statiterlist_t;
struct sort_statiterlist{
// ascending order
bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const
{
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date);
if(0 == result){
if(src1->second->hit_count < src2->second->hit_count){
result = -1;
}
}
return (result < 0);
}
};
//-------------------------------------------------------------------
// Static
//-------------------------------------------------------------------
@ -144,7 +233,7 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
if(iter != stat_cache.end() && (*iter).second){
stat_cache_entry* ent = (*iter).second;
if(!IsExpireTime|| (ent->cache_date + ExpireTime) >= time(NULL)){
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
if(ent->noobjcache){
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(!IsCacheNoObject){
@ -156,19 +245,28 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
return false;
}
// hit without checking etag
string stretag;
if(petag){
string stretag = ent->meta["ETag"];
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
is_delete_cache = true;
// find & check ETag
for(headers_t::iterator iter = ent->meta.begin(); iter != ent->meta.end(); ++iter){
string tag = lower(iter->first);
if(tag == "etag"){
stretag = iter->second;
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
is_delete_cache = true;
}
break;
}
}
}
if(is_delete_cache){
// not hit by different ETag
DPRNNN("stat cache not hit by ETag[path=%s][time=%jd][hit count=%lu][ETag(%s)!=(%s)]",
strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count, petag ? petag : "null", ent->meta["ETag"].c_str());
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%jd.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
}else{
// hit
DPRNNN("stat cache hit [path=%s][time=%jd][hit count=%lu]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count);
S3FS_PRN_DBG("stat cache hit [path=%s][time=%jd.%09ld][hit count=%lu]",
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
if(pst!= NULL){
*pst= ent->stbuf;
@ -180,7 +278,7 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
(*pisforce) = ent->isforce;
}
ent->hit_count++;
ent->cache_date = time(NULL);
SetStatCacheTime(ent->cache_date);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
@ -220,10 +318,10 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
}
if(iter != stat_cache.end() && (*iter).second) {
if(!IsExpireTime|| ((*iter).second->cache_date + ExpireTime) >= time(NULL)){
if(!IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){
if((*iter).second->noobjcache){
// noobjcache = true means no object.
(*iter).second->cache_date = time(NULL);
SetStatCacheTime((*iter).second->cache_date);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
@ -240,17 +338,24 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
return false;
}
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
{
if(CacheSize< 1){
if(!no_truncate && CacheSize< 1){
return true;
}
DPRNNN("add stat cache entry[path=%s]", key.c_str());
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
if(stat_cache.end() != stat_cache.find(key)){
pthread_mutex_lock(&StatCache::stat_cache_lock);
bool found = stat_cache.end() != stat_cache.find(key);
bool do_truncate = stat_cache.size() > CacheSize;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(found){
DelStat(key.c_str());
}else{
if(stat_cache.size() > CacheSize){
if(do_truncate){
if(!TruncateCache()){
return false;
}
@ -264,10 +369,11 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
return false;
}
ent->hit_count = 0;
ent->cache_date = time(NULL); // Set time.
ent->isforce = forcedir;
ent->noobjcache = false;
ent->notruncate = (no_truncate ? 1L : 0L);
ent->meta.clear();
SetStatCacheTime(ent->cache_date); // Set time.
//copy only some keys
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
string tag = lower(iter->first);
@ -284,9 +390,19 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
ent->meta[tag] = value; // key is lower case for "x-amz"
}
}
// add
pthread_mutex_lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
if(stat_cache.end() != iter){
if(iter->second){
delete iter->second;
}
stat_cache.erase(iter);
}
stat_cache[key] = ent;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
@ -300,12 +416,19 @@ bool StatCache::AddNoObjectCache(string& key)
if(CacheSize < 1){
return true;
}
DPRNNN("add no object cache entry[path=%s]", key.c_str());
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
if(stat_cache.end() != stat_cache.find(key)){
pthread_mutex_lock(&StatCache::stat_cache_lock);
bool found = stat_cache.end() != stat_cache.find(key);
bool do_truncate = stat_cache.size() > CacheSize;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
if(found){
DelStat(key.c_str());
}else{
if(stat_cache.size() > CacheSize){
if(do_truncate){
if(!TruncateCache()){
return false;
}
@ -316,47 +439,103 @@ bool StatCache::AddNoObjectCache(string& key)
stat_cache_entry* ent = new stat_cache_entry();
memset(&(ent->stbuf), 0, sizeof(struct stat));
ent->hit_count = 0;
ent->cache_date = time(NULL); // Set time.
ent->isforce = false;
ent->noobjcache = true;
ent->notruncate = 0L;
ent->meta.clear();
SetStatCacheTime(ent->cache_date); // Set time.
// add
pthread_mutex_lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
if(stat_cache.end() != iter){
if(iter->second){
delete iter->second;
}
stat_cache.erase(iter);
}
stat_cache[key] = ent;
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
{
pthread_mutex_lock(&StatCache::stat_cache_lock);
stat_cache_t::iterator iter = stat_cache.find(key);
if(stat_cache.end() != iter){
stat_cache_entry* ent = iter->second;
if(ent){
if(no_truncate){
++(ent->notruncate);
}else{
if(0L < ent->notruncate){
--(ent->notruncate);
}
}
}
}
pthread_mutex_unlock(&StatCache::stat_cache_lock);
}
bool StatCache::TruncateCache(void)
{
if(0 == stat_cache.size()){
if(stat_cache.empty()){
return true;
}
pthread_mutex_lock(&StatCache::stat_cache_lock);
time_t lowest_time = time(NULL) + 1;
stat_cache_t::iterator iter_to_delete = stat_cache.end();
stat_cache_t::iterator iter;
for(iter = stat_cache.begin(); iter != stat_cache.end(); iter++) {
if((*iter).second){
if(lowest_time > (*iter).second->cache_date){
lowest_time = (*iter).second->cache_date;
iter_to_delete = iter;
// 1) erase over expire time
if(IsExpireTime){
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
stat_cache_entry* entry = iter->second;
if(!entry || (0L < entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
stat_cache.erase(iter++);
}else{
++iter;
}
}
}
if(stat_cache.end() != iter_to_delete){
DPRNNN("truncate stat cache[path=%s]", (*iter_to_delete).first.c_str());
if((*iter_to_delete).second){
delete (*iter_to_delete).second;
}
stat_cache.erase(iter_to_delete);
S3FS_MALLOCTRIM(0);
// 2) check stat cache count
if(stat_cache.size() < CacheSize){
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
}
// 3) erase from the old cache in order
size_t erase_count= stat_cache.size() - CacheSize + 1;
statiterlist_t erase_iters;
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
// check no truncate
stat_cache_entry* ent = iter->second;
if(ent && 0L < ent->notruncate){
// skip for no truncate entry
if(0 < erase_count){
--erase_count; // decrement
}
}
// iter is not have notruncate flag
erase_iters.push_back(iter);
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
if(erase_count < erase_iters.size()){
erase_iters.pop_back();
}
}
for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
stat_cache_t::iterator siter = *iiter;
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
stat_cache.erase(siter);
}
S3FS_MALLOCTRIM(0);
pthread_mutex_unlock(&StatCache::stat_cache_lock);
return true;
@ -367,7 +546,7 @@ bool StatCache::DelStat(const char* key)
if(!key){
return false;
}
DPRNNN("delete stat cache entry[path=%s]", key);
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
pthread_mutex_lock(&StatCache::stat_cache_lock);

View File

@ -17,6 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CACHE_H_
#define S3FS_CACHE_H_
@ -26,15 +27,18 @@
// Struct
//
struct stat_cache_entry {
struct stat stbuf;
unsigned long hit_count;
time_t cache_date;
headers_t meta;
bool isforce;
bool noobjcache; // Flag: cache is no object for no listing.
struct stat stbuf;
unsigned long hit_count;
struct timespec cache_date;
headers_t meta;
bool isforce;
bool noobjcache; // Flag: cache is no object for no listing.
unsigned long notruncate; // 0<: not remove automatically at checking truncate
stat_cache_entry() : hit_count(0), cache_date(0), isforce(false), noobjcache(false) {
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) {
memset(&stbuf, 0, sizeof(struct stat));
cache_date.tv_sec = 0;
cache_date.tv_nsec = 0;
meta.clear();
}
};
@ -56,15 +60,15 @@ class StatCache
bool IsCacheNoObject;
private:
StatCache();
~StatCache();
void Clear(void);
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
// Truncate stat cache
bool TruncateCache(void);
public:
StatCache();
~StatCache();
// Reference singleton
static StatCache* getStatCacheData(void) {
return &singleton;
@ -109,7 +113,10 @@ class StatCache
bool AddNoObjectCache(std::string& key);
// Add stat cache
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false);
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
// Change no truncate flag
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
// Delete stat cache
bool DelStat(const char* key);

View File

@ -17,64 +17,92 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_COMMON_H_
#define S3FS_COMMON_H_
#include "../config.h"
//
// Macro
//
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
// for debug
#define FPRINT_NEST_SPACE_0 ""
#define FPRINT_NEST_SPACE_1 " "
#define FPRINT_NEST_SPACE_2 " "
#define FPRINT_NEST_CHECK(NEST) \
(0 == NEST ? FPRINT_NEST_SPACE_0 : 1 == NEST ? FPRINT_NEST_SPACE_1 : FPRINT_NEST_SPACE_2)
//
// Debug level
//
enum s3fs_log_level{
S3FS_LOG_CRIT = 0, // LOG_CRIT
S3FS_LOG_ERR = 1, // LOG_ERR
S3FS_LOG_WARN = 3, // LOG_WARNING
S3FS_LOG_INFO = 7, // LOG_INFO
S3FS_LOG_DBG = 15 // LOG_DEBUG
};
#define LOWFPRINT(NEST, ...) \
printf("%s%s(%d): ", FPRINT_NEST_CHECK(NEST), __func__, __LINE__); \
printf(__VA_ARGS__); \
printf("\n"); \
//
// Debug macros
//
#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level)
#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG))
#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG))
#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG))
#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG))
#define FPRINT(NEST, ...) \
if(foreground){ \
LOWFPRINT(NEST, __VA_ARGS__); \
}
#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT )
#define FPRINT2(NEST, ...) \
if(foreground2){ \
LOWFPRINT(NEST, __VA_ARGS__); \
}
#define S3FS_LOG_LEVEL_STRING(level) \
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " )
#define LOWSYSLOGPRINT(LEVEL, ...) \
syslog(LEVEL, __VA_ARGS__);
#define S3FS_LOG_NEST_MAX 4
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
#define SYSLOGPRINT(LEVEL, ...) \
if(LEVEL <= LOG_CRIT || debug){ \
LOWSYSLOGPRINT(LEVEL, __VA_ARGS__); \
}
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
if(foreground){ \
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
} \
}
#define DPRINT(LEVEL, NEST, ...) \
FPRINT(NEST, __VA_ARGS__); \
SYSLOGPRINT(LEVEL, __VA_ARGS__);
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
if(foreground){ \
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
} \
}
#define DPRINT2(LEVEL, ...) \
FPRINT2(2, __VA_ARGS__); \
SYSLOGPRINT(LEVEL, __VA_ARGS__);
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
if(foreground){ \
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
}else{ \
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
}
// print debug message
#define FPRN(...) FPRINT(0, __VA_ARGS__)
#define FPRNN(...) FPRINT(1, __VA_ARGS__)
#define FPRNNN(...) FPRINT(2, __VA_ARGS__)
#define FPRNINFO(...) FPRINT2(2, __VA_ARGS__)
// print debug message with putting syslog
#define DPRNCRIT(...) DPRINT(LOG_CRIT, 0, __VA_ARGS__)
#define DPRN(...) DPRINT(LOG_ERR, 0, __VA_ARGS__)
#define DPRNN(...) DPRINT(LOG_DEBUG, 1, __VA_ARGS__)
#define DPRNNN(...) DPRINT(LOG_DEBUG, 2, __VA_ARGS__)
#define DPRNINFO(...) DPRINT2(LOG_INFO, __VA_ARGS__)
// [NOTE]
// small trick for VA_ARGS
//
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__)
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "")
//
// Typedef
@ -90,7 +118,7 @@ typedef struct xattr_value{
unsigned char* pvalue;
size_t length;
xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
~xattr_value()
{
if(pvalue){
@ -104,17 +132,17 @@ typedef std::map<std::string, PXATTRVAL> xattrs_t;
//
// Global valiables
//
extern bool debug;
extern bool foreground;
extern bool foreground2;
extern bool nomultipart;
extern bool pathrequeststyle;
extern std::string program_name;
extern std::string service_path;
extern std::string host;
extern std::string bucket;
extern std::string mount_prefix;
extern std::string endpoint;
extern bool foreground;
extern bool nomultipart;
extern bool pathrequeststyle;
extern std::string program_name;
extern std::string service_path;
extern std::string host;
extern std::string bucket;
extern std::string mount_prefix;
extern std::string endpoint;
extern s3fs_log_level debug_level;
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
#endif // S3FS_COMMON_H_

View File

@ -18,104 +18,20 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include "s3fs_auth.h"
#include "string_util.h"
using namespace std;
//-------------------------------------------------------------------
// Utility Function
//-------------------------------------------------------------------
char* s3fs_base64(const unsigned char* input, size_t length)
{
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
char* result;
if(!input || 0 >= length){
return NULL;
}
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
parts[0] = (input[rpos] & 0xfc) >> 2;
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
result[wpos++] = base[parts[0]];
result[wpos++] = base[parts[1]];
result[wpos++] = base[parts[2]];
result[wpos++] = base[parts[3]];
}
result[wpos] = '\0';
return result;
}
inline unsigned char char_decode64(const char ch)
{
unsigned char by;
if('A' <= ch && ch <= 'Z'){ // A - Z
by = static_cast<unsigned char>(ch - 'A');
}else if('a' <= ch && ch <= 'z'){ // a - z
by = static_cast<unsigned char>(ch - 'a' + 26);
}else if('0' <= ch && ch <= '9'){ // 0 - 9
by = static_cast<unsigned char>(ch - '0' + 52);
}else if('+' == ch){ // +
by = 62;
}else if('/' == ch){ // /
by = 63;
}else if('=' == ch){ // =
by = 64;
}else{ // something wrong
by = 64;
}
return by;
}
unsigned char* s3fs_decode64(const char* input, size_t* plength)
{
unsigned char* result;
if(!input || 0 == strlen(input) || !plength){
return NULL;
}
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t input_len = strlen(input);
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
parts[0] = char_decode64(input[rpos]);
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
if(64 == parts[2]){
break;
}
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
if(64 == parts[3]){
break;
}
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
}
*plength = wpos;
return result;
}
string s3fs_get_content_md5(int fd)
{
unsigned char* md5hex;
@ -139,22 +55,16 @@ string s3fs_get_content_md5(int fd)
string s3fs_md5sum(int fd, off_t start, ssize_t size)
{
size_t digestlen = get_md5_digest_length();
char md5[2 * digestlen + 1];
char hexbuf[3];
unsigned char* md5hex;
if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){
return string("");
}
memset(md5, 0, 2 * digestlen + 1);
for(size_t pos = 0; pos < digestlen; pos++){
snprintf(hexbuf, 3, "%02x", md5hex[pos]);
strncat(md5, hexbuf, 2);
}
std::string md5 = s3fs_hex(md5hex, digestlen);
free(md5hex);
return string(md5);
return md5;
}
string s3fs_sha256sum(int fd, off_t start, ssize_t size)

File diff suppressed because it is too large Load Diff

View File

@ -17,9 +17,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CURL_H_
#define S3FS_CURL_H_
#include <cassert>
//----------------------------------------------
// Symbols
//----------------------------------------------
#define MIN_MULTIPART_SIZE 5242880 // 5MB
//----------------------------------------------
// class BodyData
//----------------------------------------------
@ -115,6 +123,35 @@ typedef std::map<CURL*, progress_t> curlprogress_t;
class S3fsMultiCurl;
//----------------------------------------------
// class CurlHandlerPool
//----------------------------------------------
class CurlHandlerPool
{
public:
CurlHandlerPool(int maxHandlers)
: mMaxHandlers(maxHandlers)
, mHandlers(NULL)
, mIndex(-1)
{
assert(maxHandlers > 0);
}
bool Init();
bool Destroy();
CURL* GetHandler();
void ReturnHandler(CURL* h);
private:
int mMaxHandlers;
pthread_mutex_t mLock;
CURL** mHandlers;
int mIndex;
};
//----------------------------------------------
// class S3fsCurl
//----------------------------------------------
@ -122,6 +159,21 @@ typedef std::map<std::string, std::string> iamcredmap_t;
typedef std::map<std::string, std::string> sseckeymap_t;
typedef std::list<sseckeymap_t> sseckeylist_t;
// strage class(rrs)
enum storage_class_t {
STANDARD,
STANDARD_IA,
REDUCED_REDUNDANCY
};
// sse type
enum sse_type_t {
SSE_DISABLE = 0, // not use server side encrypting
SSE_S3, // server side encrypting by S3 key
SSE_C, // server side encrypting by custom key
SSE_KMS // server side encrypting by kms id
};
// share
#define SHARE_MUTEX_DNS 0
#define SHARE_MUTEX_SSL_SESSION 1
@ -149,13 +201,16 @@ class S3fsCurl
REQTYPE_COPYMULTIPOST,
REQTYPE_MULTILIST,
REQTYPE_IAMCRED,
REQTYPE_ABORTMULTIUPLOAD
REQTYPE_ABORTMULTIUPLOAD,
REQTYPE_IAMROLE
};
// class variables
static pthread_mutex_t curl_handles_lock;
static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX];
static bool is_initglobal_done;
static CurlHandlerPool* sCurlPool;
static int sCurlPoolSize;
static CURLSH* hCurlShare;
static bool is_cert_check;
static bool is_dns_cache;
@ -165,9 +220,10 @@ class S3fsCurl
static int retries;
static bool is_public_bucket;
static std::string default_acl; // TODO: to enum
static bool is_use_rrs;
static storage_class_t storage_class;
static sseckeylist_t sseckeys;
static bool is_use_sse;
static std::string ssekmsid;
static sse_type_t ssetype;
static bool is_content_md5;
static bool is_verbose;
static std::string AWSAccessKeyId;
@ -183,6 +239,7 @@ class S3fsCurl
static int max_parallel_cnt;
static off_t multipart_size;
static bool is_sigv4;
static bool is_ua; // User-Agent
// variables
CURL* hCurl;
@ -206,12 +263,13 @@ class S3fsCurl
int b_postdata_remaining; // backup for retrying
off_t b_partdata_startpos; // backup for retrying
ssize_t b_partdata_size; // backup for retrying
bool b_ssekey_pos; // backup for retrying
std::string b_ssekey_md5; // backup for retrying
int b_ssekey_pos; // backup for retrying
std::string b_ssevalue; // backup for retrying
sse_type_t b_ssetype; // backup for retrying
public:
// constructor/destructor
S3fsCurl(bool ahbe = false);
explicit S3fsCurl(bool ahbe = false);
~S3fsCurl();
private:
@ -240,22 +298,26 @@ class S3fsCurl
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
static bool SetIAMCredentials(const char* response);
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
static bool SetIAMRoleFromMetaData(const char* response);
static bool LoadEnvSseCKeys(void);
static bool LoadEnvSseKmsid(void);
static bool PushbackSseKeys(std::string& onekey);
static bool AddUserAgent(CURL* hCurl);
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
// methods
bool ResetHandle(void);
bool RemakeHandle(void);
bool ClearInternalData(void);
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
std::string CalcSignatureV2(std::string method, std::string strMD5, std::string content_type, std::string date, std::string resource);
std::string CalcSignature(std::string method, std::string canonical_uri, std::string query_string, std::string strdate, std::string payload_hash, std::string date8601);
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
bool GetUploadId(std::string& upload_id);
int GetIAMCredentials(void);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
public:
@ -267,7 +329,7 @@ class S3fsCurl
static bool CheckIAMCredentialUpdate(void);
// class methods(valiables)
static std::string LookupMimeType(std::string name);
static std::string LookupMimeType(const std::string& name);
static bool SetCheckCertificate(bool isCertCheck);
static bool SetDnsCache(bool isCache);
static bool SetSslSessionCache(bool isCache);
@ -278,16 +340,23 @@ class S3fsCurl
static bool SetPublicBucket(bool flag);
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
static std::string SetDefaultAcl(const char* acl);
static bool SetUseRrs(bool flag);
static bool GetUseRrs(void) { return S3fsCurl::is_use_rrs; }
static bool SetSseKeys(const char* filepath);
static bool LoadEnvSseKeys(void);
static storage_class_t SetStorageClass(storage_class_t storage_class);
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
static sse_type_t SetSseType(sse_type_t type);
static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; }
static bool IsSseDisable(void) { return (SSE_DISABLE == S3fsCurl::ssetype); }
static bool IsSseS3Type(void) { return (SSE_S3 == S3fsCurl::ssetype); }
static bool IsSseCType(void) { return (SSE_C == S3fsCurl::ssetype); }
static bool IsSseKmsType(void) { return (SSE_KMS == S3fsCurl::ssetype); }
static bool FinalCheckSse(void);
static bool SetSseCKeys(const char* filepath);
static bool SetSseKmsid(const char* kmsid);
static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); }
static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); }
static bool GetSseKey(std::string& md5, std::string& ssekey);
static bool GetSseKeyMd5(int pos, std::string& md5);
static int GetSseKeyCount(void);
static bool IsSseCustomMode(void);
static bool SetUseSse(bool flag);
static bool GetUseSse(void) { return S3fsCurl::is_use_sse; }
static bool SetContentMd5(bool flag);
static bool SetVerbose(bool flag);
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
@ -305,12 +374,15 @@ class S3fsCurl
static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; }
static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; }
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
// methods
bool CreateCurlHandle(bool force = false);
bool DestroyCurlHandle(void);
bool AddSseKeyRequestHead(std::string& md5, bool is_copy);
bool LoadIAMRoleFromMetaData(void);
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
bool GetResponseCode(long& responseCode);
int RequestPerform(void);
int DeleteRequest(const char* tpath);
@ -321,14 +393,18 @@ class S3fsCurl
int HeadRequest(const char* tpath, headers_t& meta);
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
int PutRequest(const char* tpath, headers_t& meta, int fd);
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, std::string& ssekeymd5);
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue);
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
int CheckBucket(void);
int ListBucketRequest(const char* tpath, const char* query);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
int MultipartListRequest(std::string& body);
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
// methods(valiables)
@ -391,36 +467,6 @@ class S3fsMultiCurl
int Request(void);
};
//----------------------------------------------
// class AdditionalHeader
//----------------------------------------------
typedef std::list<int> charcnt_list_t;
typedef std::map<std::string, std::string> headerpair_t;
typedef std::map<std::string, headerpair_t> addheader_t;
class AdditionalHeader
{
private:
static AdditionalHeader singleton;
bool is_enable;
charcnt_list_t charcntlist;
addheader_t addheader;
public:
// Reference singleton
static AdditionalHeader* get(void) { return &singleton; }
AdditionalHeader();
~AdditionalHeader();
bool Load(const char* file);
void Unload(void);
bool AddHeader(headers_t& meta, const char* path) const;
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
bool Dump(void) const;
};
//----------------------------------------------
// Utility Functions
//----------------------------------------------
@ -433,6 +479,7 @@ std::string get_sorted_header_keys(const struct curl_slist* list);
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
std::string prepare_url(const char* url);
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
#endif // S3FS_CURL_H_

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,9 @@
#ifndef FD_CACHE_H_
#define FD_CACHE_H_
#include <sys/statvfs.h>
#include "curl.h"
//------------------------------------------------
// CacheFileStat
//------------------------------------------------
@ -34,8 +37,10 @@ class CacheFileStat
public:
static bool DeleteCacheFileStat(const char* path);
static bool CheckCacheFileStatTopDir(void);
static bool DeleteCacheFileStatDirectory(void);
CacheFileStat(const char* tpath = NULL);
explicit CacheFileStat(const char* tpath = NULL);
~CacheFileStat();
bool Open(void);
@ -52,40 +57,49 @@ struct fdpage
{
off_t offset;
size_t bytes;
bool init;
bool loaded;
fdpage(off_t start = 0, size_t size = 0, bool is_init = false)
: offset(start), bytes(size), init(is_init) {}
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
: offset(start), bytes(size), loaded(is_loaded) {}
off_t next(void) const { return (offset + bytes); }
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
};
typedef std::list<struct fdpage*> fdpage_list_t;
class FdEntity;
//
// Management of loading area/modifying
//
class PageList
{
friend class FdEntity; // only one method access directly pages.
private:
fdpage_list_t pages;
private:
void Clear(void);
bool Compress(void);
bool Parse(off_t new_pos);
public:
static void FreeList(fdpage_list_t& list);
PageList(off_t size = 0, bool is_init = false);
explicit PageList(size_t size = 0, bool is_loaded = false);
~PageList();
off_t Size(void) const;
int Resize(off_t size, bool is_init);
int Init(off_t size, bool is_init);
bool IsInit(off_t start, off_t size);
bool SetInit(off_t start, off_t size, bool is_init = true);
bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize);
int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0, off_t size = -1);
bool Init(size_t size, bool is_loaded);
size_t Size(void) const;
bool Resize(size_t size, bool is_loaded);
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
bool Serialize(CacheFileStat& file, bool is_output);
void Dump(void);
};
@ -99,39 +113,61 @@ class FdEntity
pthread_mutex_t fdent_lock;
bool is_lock_init;
PageList pagelist;
int refcnt; // reference count
std::string path; // object path
std::string cachepath; // local cache file path
int fd; // file descriptor(tmp file or cache file)
FILE* file; // file pointer(tmp file or cache file)
bool is_modify; // if file is changed, this flag is true
int refcnt; // reference count
std::string path; // object path
std::string cachepath; // local cache file path
// (if this is empty, does not load/save pagelist.)
int fd; // file descriptor(tmp file or cache file)
FILE* pfile; // file pointer(tmp file or cache file)
bool is_modify; // if file is changed, this flag is true
headers_t orgmeta; // original headers at opening
size_t size_orgmeta; // original file size in original headers
std::string upload_id; // for no cached multipart uploading when no disk space
etaglist_t etaglist; // for no cached multipart uploading when no disk space
off_t mp_start; // start position for no cached multipart(write method only)
size_t mp_size; // size for no cached multipart(write method only)
private:
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
void Clear(void);
int Dup(void);
bool SetAllStatus(bool is_enable);
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
public:
FdEntity(const char* tpath = NULL, const char* cpath = NULL);
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
~FdEntity();
void Close(void);
bool IsOpen(void) const { return (-1 != fd); }
int Open(off_t size = -1, time_t time = -1);
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1);
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
int Dup(void);
const char* GetPath(void) const { return path.c_str(); }
void SetPath(const std::string &newpath) { path = newpath; }
int GetFd(void) const { return fd; }
int SetMtime(time_t time);
bool GetSize(off_t& size);
bool GetMtime(time_t& time);
bool GetStats(struct stat& st);
bool SetAllEnable(void) { return SetAllStatus(true); }
bool SetAllDisable(void) { return SetAllStatus(false); }
bool LoadFull(off_t* size = NULL, bool force_load = false);
int Load(off_t start, off_t size);
int RowFlush(const char* tpath, headers_t& meta, bool force_sync = false);
int Flush(headers_t& meta, bool force_sync = false) { return RowFlush(NULL, meta, force_sync); }
bool GetStats(struct stat& st);
int SetMtime(time_t time);
bool UpdateMtime(void);
bool GetSize(size_t& size);
bool SetMode(mode_t mode);
bool SetUId(uid_t uid);
bool SetGId(gid_t gid);
bool SetContentType(const char* path);
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
int NoCachePreMultipartPost(void);
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
int NoCacheCompleteMultipartPost(void);
int RowFlush(const char* tpath, bool force_sync = false);
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
ssize_t Write(const char* bytes, off_t start, size_t size);
};
@ -147,9 +183,12 @@ class FdManager
static pthread_mutex_t fd_manager_lock;
static bool is_lock_init;
static std::string cache_dir;
static size_t page_size;
static size_t free_disk_space; // limit free disk space
fdent_map_t fent;
fdent_map_t fent;
private:
static fsblkcnt_t GetFreeDiskSpace(const char* path);
public:
FdManager();
@ -163,16 +202,21 @@ class FdManager
static bool SetCacheDir(const char* dir);
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
static size_t SetPageSize(size_t size);
static size_t GetPageSize(void) { return FdManager::page_size; }
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
static bool CheckCacheTopDir(void);
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
static size_t SetEnsureFreeDiskSpace(size_t size);
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
static bool IsSafeDiskSpace(const char* path, size_t size);
FdEntity* GetFdEntity(const char* path, int existfd = -1);
FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
FdEntity* ExistOpen(const char* path, int existfd = -1);
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
void Rename(const std::string &from, const std::string &to);
bool Close(FdEntity* ent);
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
};
#endif // FD_CACHE_H_

View File

@ -217,7 +217,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
md5_update(&ctx_md5, bytes, buf);
@ -261,7 +261,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
memset(buf, 0, 512);
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
DPRNN("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return NULL;
}
@ -273,7 +273,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
gcry_md_write(ctx_md5, buf, bytes);
@ -344,7 +344,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
sha256_update(&ctx_sha256, bytes, buf);
@ -375,7 +375,7 @@ bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char*
gcry_md_hd_t ctx_sha256;
gcry_error_t err;
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
DPRNN("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
free(*digest);
return false;
}
@ -409,7 +409,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
memset(buf, 0, 512);
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
DPRNN("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
return NULL;
}
@ -421,7 +421,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
gcry_md_write(ctx_sha256, buf, bytes);

View File

@ -182,7 +182,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
PK11_DigestOp(md5ctx, buf, bytes);
@ -262,7 +262,8 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
PK11_DestroyContext(sha256ctx, PR_TRUE);
return NULL;
}
PK11_DigestOp(sha256ctx, buf, bytes);

View File

@ -105,7 +105,7 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l
struct CRYPTO_dynlock_value* dyndata;
if(NULL == (dyndata = static_cast<struct CRYPTO_dynlock_value*>(malloc(sizeof(struct CRYPTO_dynlock_value))))){
DPRNCRIT("Could not allocate memory for CRYPTO_dynlock_value");
S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value");
return NULL;
}
pthread_mutex_init(&(dyndata->dyn_mutex), NULL);
@ -134,14 +134,14 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
bool s3fs_init_crypt_mutex(void)
{
if(s3fs_crypt_mutex){
FPRNNN("s3fs_crypt_mutex is not NULL, destroy it.");
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
if(!s3fs_destroy_crypt_mutex()){
DPRN("Failed to s3fs_crypt_mutex");
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
return false;
}
}
if(NULL == (s3fs_crypt_mutex = static_cast<pthread_mutex_t*>(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){
DPRNCRIT("Could not allocate memory for s3fs_crypt_mutex");
S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex");
return false;
}
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
@ -250,7 +250,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
return NULL;
}
MD5_Update(&md5ctx, buf, bytes);
@ -297,10 +297,8 @@ bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char*
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
{
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* sha256ctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(sha256ctx, md, NULL);
const EVP_MD* md = EVP_get_digestbyname("sha256");
EVP_MD_CTX* sha256ctx;
char buf[512];
ssize_t bytes;
unsigned char* result;
@ -318,6 +316,9 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
return NULL;
}
sha256ctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(sha256ctx, md, NULL);
memset(buf, 0, 512);
for(ssize_t total = 0; total < size; total += bytes){
bytes = 512 < (size - total) ? 512 : (size - total);
@ -327,13 +328,15 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
break;
}else if(-1 == bytes){
// error
DPRNNN("file read error(%d)", errno);
S3FS_PRN_ERR("file read error(%d)", errno);
EVP_MD_CTX_destroy(sha256ctx);
return NULL;
}
EVP_DigestUpdate(sha256ctx, buf, bytes);
memset(buf, 0, 512);
}
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
EVP_MD_CTX_destroy(sha256ctx);
return NULL;
}
EVP_DigestFinal_ex(sha256ctx, result, NULL);

File diff suppressed because it is too large Load Diff

View File

@ -84,8 +84,6 @@
#endif // HAVE_MALLOC_TRIM
char* get_object_sseckey_md5(const char* path);
#endif // S3FS_S3_H_
/*

View File

@ -20,14 +20,15 @@
#ifndef S3FS_AUTH_H_
#define S3FS_AUTH_H_
#include <string>
#include <sys/types.h>
//-------------------------------------------------------------------
// Utility functions for Authentication
//-------------------------------------------------------------------
//
// in common_auth.cpp
//
char* s3fs_base64(const unsigned char* input, size_t length);
unsigned char* s3fs_decode64(const char* input, size_t* plength);
std::string s3fs_get_content_md5(int fd);
std::string s3fs_md5sum(int fd, off_t start, ssize_t size);
std::string s3fs_sha256sum(int fd, off_t start, ssize_t size);

View File

@ -60,6 +60,20 @@ string get_realpath(const char *path) {
return realpath;
}
inline headers_t::const_iterator find_content_type(headers_t& meta)
{
headers_t::const_iterator iter;
if(meta.end() == (iter = meta.find("Content-Type"))){
if(meta.end() == (iter = meta.find("Content-type"))){
if(meta.end() == (iter = meta.find("content-type"))){
iter = meta.find("content-Type");
}
}
}
return iter;
}
//-------------------------------------------------------------------
// Class S3ObjList
//-------------------------------------------------------------------
@ -233,7 +247,7 @@ bool S3ObjList::GetLastName(std::string& lastname) const
{
bool result = false;
lastname = "";
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); iter++){
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
if((*iter).second.orgname.length()){
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
lastname = (*iter).second.orgname;
@ -253,7 +267,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla
{
s3obj_t::const_iterator iter;
for(iter = objects.begin(); objects.end() != iter; iter++){
for(iter = objects.begin(); objects.end() != iter; ++iter){
if(OnlyNormalized && 0 != (*iter).second.normalname.length()){
continue;
}
@ -275,7 +289,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
s3obj_h_t::iterator hiter;
s3obj_list_t::const_iterator liter;
for(liter = list.begin(); list.end() != liter; liter++){
for(liter = list.begin(); list.end() != liter; ++liter){
string strtmp = (*liter);
if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){
strtmp = strtmp.substr(0, strtmp.length() - 1);
@ -425,51 +439,14 @@ void free_mvnodes(MVNODE *head)
//-------------------------------------------------------------------
// Class AutoLock
//-------------------------------------------------------------------
AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex), is_locked(false)
AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex)
{
Lock();
pthread_mutex_lock(auto_mutex);
}
AutoLock::~AutoLock()
{
Unlock();
}
bool AutoLock::Lock(void)
{
if(!auto_mutex){
return false;
}
if(is_locked){
// already locked
return true;
}
try{
pthread_mutex_lock(auto_mutex);
is_locked = true;
}catch(exception& e){
is_locked = false;
return false;
}
return true;
}
bool AutoLock::Unlock(void)
{
if(!auto_mutex){
return false;
}
if(!is_locked){
// already unlocked
return true;
}
try{
pthread_mutex_unlock(auto_mutex);
is_locked = false;
}catch(exception& e){
return false;
}
return true;
pthread_mutex_unlock(auto_mutex);
}
//-------------------------------------------------------------------
@ -479,7 +456,6 @@ bool AutoLock::Unlock(void)
string get_username(uid_t uid)
{
static size_t maxlen = 0; // set onece
int result;
char* pbuf;
struct passwd pwinfo;
struct passwd* ppwinfo = NULL;
@ -488,19 +464,19 @@ string get_username(uid_t uid)
if(0 == maxlen){
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
if(0 > res){
DPRNNN("could not get max pw length.");
S3FS_PRN_WARN("could not get max pw length.");
maxlen = 0;
return string("");
}
maxlen = res;
}
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
DPRNCRIT("failed to allocate memory.");
S3FS_PRN_CRIT("failed to allocate memory.");
return string("");
}
// get group information
if(0 != (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
DPRNNN("could not get pw information.");
if(0 != getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo)){
S3FS_PRN_WARN("could not get pw information.");
free(pbuf);
return string("");
}
@ -526,19 +502,19 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
if(0 == maxlen){
long res = sysconf(_SC_GETGR_R_SIZE_MAX);
if(0 > res){
DPRNNN("could not get max name length.");
S3FS_PRN_ERR("could not get max name length.");
maxlen = 0;
return -ERANGE;
}
maxlen = res;
}
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
DPRNCRIT("failed to allocate memory.");
S3FS_PRN_CRIT("failed to allocate memory.");
return -ENOMEM;
}
// get group information
if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
DPRNNN("could not get group information.");
S3FS_PRN_ERR("could not get group information.");
free(pbuf);
return -result;
}
@ -584,23 +560,80 @@ string mybasename(string path)
// mkdir --parents
int mkdirp(const string& path, mode_t mode)
{
string base;
string component;
string base;
string component;
stringstream ss(path);
while (getline(ss, component, '/')) {
base += "/" + component;
mkdir(base.c_str(), mode);
struct stat st;
if(0 == stat(base.c_str(), &st)){
if(!S_ISDIR(st.st_mode)){
return EPERM;
}
}else{
if(0 != mkdir(base.c_str(), mode)){
return errno;
}
}
}
return 0;
}
bool check_exist_dir_permission(const char* dirpath)
{
if(!dirpath || '\0' == dirpath[0]){
return false;
}
// exists
struct stat st;
if(0 != stat(dirpath, &st)){
if(ENOENT == errno){
// dir does not exitst
return true;
}
if(EACCES == errno){
// could not access directory
return false;
}
// somthing error occured
return false;
}
// check type
if(!S_ISDIR(st.st_mode)){
// path is not directory
return false;
}
// check permission
uid_t myuid = geteuid();
if(myuid == st.st_uid){
if(S_IRWXU != (st.st_mode & S_IRWXU)){
return false;
}
}else{
if(1 == is_uid_inculde_group(myuid, st.st_gid)){
if(S_IRWXG != (st.st_mode & S_IRWXG)){
return false;
}
}else{
if(S_IRWXO != (st.st_mode & S_IRWXO)){
return false;
}
}
}
return true;
}
bool delete_files_in_dir(const char* dir, bool is_remove_own)
{
DIR* dp;
struct dirent* dent;
if(NULL == (dp = opendir(dir))){
DPRNINFO("could not open dir(%s) - errno(%d)", dir, errno);
S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno);
return false;
}
@ -613,20 +646,20 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
fullpath += dent->d_name;
struct stat st;
if(0 != lstat(fullpath.c_str(), &st)){
DPRN("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
closedir(dp);
return false;
}
if(S_ISDIR(st.st_mode)){
// dir -> Reentrant
if(!delete_files_in_dir(fullpath.c_str(), true)){
DPRNINFO("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno);
S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno);
closedir(dp);
return false;
}
}else{
if(0 != unlink(fullpath.c_str())){
DPRN("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno);
S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno);
closedir(dp);
return false;
}
@ -635,7 +668,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
closedir(dp);
if(is_remove_own && 0 != rmdir(dir)){
DPRN("could not remove dir(%s) - errno(%d)", dir, errno);
S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno);
return false;
}
return true;
@ -703,8 +736,13 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
if(forcedir){
mode |= S_IFDIR;
}else{
if(meta.end() != (iter = meta.find("Content-Type"))){
if(meta.end() != (iter = find_content_type(meta))){
string strConType = (*iter).second;
// Leave just the mime type, remove any optional parameters (eg charset)
string::size_type pos = strConType.find(";");
if(string::npos != pos){
strConType = strConType.substr(0, pos);
}
if(strConType == "application/x-directory"){
mode |= S_IFDIR;
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
@ -826,7 +864,7 @@ bool is_need_check_obj_detail(headers_t& meta)
}
// if there is not Content-Type, or Content-Type is "x-directory",
// checking is no more.
if(meta.end() == (iter = meta.find("Content-Type"))){
if(meta.end() == (iter = find_content_type(meta))){
return false;
}
if("application/x-directory" == (*iter).second){
@ -875,28 +913,54 @@ void show_help (void)
" del_cache (delete local file cache)\n"
" - delete local file cache when s3fs starts and exits.\n"
"\n"
" use_rrs (default is disable)\n"
" - this option makes Amazon's Reduced Redundancy Storage enable.\n"
" storage_class (default=\"standard\")\n"
" - store object with specified storage class. Possible values:\n"
" standard, standard_ia, and reduced_redundancy.\n"
"\n"
" use_sse (default is disable)\n"
" - use Amazon's Server-Site Encryption or Server-Side Encryption\n"
" with Customer-Provided Encryption Keys.\n"
" this option can not be specified with use_rrs. specifying only \n"
" \"use_sse\" or \"use_sse=1\" enables Server-Side Encryption.\n"
" (use_sse=1 for old version)\n"
" specifying this option with file path which has some SSE-C\n"
" secret key enables Server-Side Encryption with Customer-Provided\n"
" Encryption Keys.(use_sse=file)\n"
" the file must be 600 permission. the file can have some lines,\n"
" each line is one SSE-C key. the first line in file is used as\n"
" Customer-Provided Encryption Keys for uploading and changing\n"
" headers etc.\n"
" if there are some keys after first line, those are used\n"
" downloading object which are encrypted by not first key.\n"
" so that, you can keep all SSE-C keys in file, that is SSE-C\n"
" key history.\n"
" if AWSSSECKEYS environment is set, you can set SSE-C key instead\n"
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
" SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n"
" keys, SSE-C uses customer-provided encryption keys, and\n"
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
" type(use_sse=1 is old type parameter).\n"
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
" \"use_sse=custom:<custom key file path>\" or\n"
" \"use_sse=<custom key file path>\"(only <custom key file path>\n"
" specified is old type parameter). You can use \"c\" for\n"
" short \"custom\".\n"
" The custom key file must be 600 permission. The file can\n"
" have some lines, each line is one SSE-C key. The first line\n"
" in file is used as Customer-Provided Encryption Keys for\n"
" uploading and changing headers etc. If there are some keys\n"
" after first line, those are used downloading object which\n"
" are encrypted by not first key. So that, you can keep all\n"
" SSE-C keys in file, that is SSE-C key history.\n"
" If you specify \"custom\"(\"c\") without file path, you\n"
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
" environment.(AWSSSECKEYS environment has some SSE-C keys\n"
" with \":\" separator.) This option is used to decide the\n"
" SSE type. So that if you do not want to encrypt a object\n"
" object at uploading, but you need to decrypt encrypted\n"
" object at downloaing, you can use load_sse_c option instead\n"
" of this option.\n"
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
" KMS, you can set it after \"kmsid:\"(or \"k:\"). If you\n"
" specify only \"kmsid\"(\"k\"), you need to set AWSSSEKMSID\n"
" environment which value is <kms id>. You must be careful\n"
" about that you can not use the KMS id which is not same EC2\n"
" region.\n"
"\n"
" load_sse_c - specify SSE-C keys\n"
" Specify the custom-provided encription keys file path for decrypting\n"
" at duwnloading.\n"
" If you use the custom-provided encription key at uploading, you\n"
" specify with \"use_sse=custom\". The file has many lines, one line\n"
" means one custom key. So that you can keep all SSE-C keys in file,\n"
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
" file contents.\n"
"\n"
" public_bucket (default=\"\" which means disabled)\n"
" - anonymously mount a public bucket when set to 1\n"
@ -909,26 +973,28 @@ void show_help (void)
" file is the additional HTTP header by file(object) extension.\n"
" The configuration file format is below:\n"
" -----------\n"
" line = [file suffix] HTTP-header [HTTP-values]\n"
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
" file suffix = file(object) suffix, if this field is empty,\n"
" it means \"*\"(all object).\n"
" it means \"reg:(.*)\".(=all object).\n"
" regex = regular expression to match the file(object) path.\n"
" this type starts with \"reg:\" prefix.\n"
" HTTP-header = additional HTTP header name\n"
" HTTP-values = additional HTTP header value\n"
" -----------\n"
" Sample:\n"
" -----------\n"
" .gz Content-Encoding gzip\n"
" .Z Content-Encoding compress\n"
" X-S3FS-MYHTTPHEAD myvalue\n"
" .gz Content-Encoding gzip\n"
" .Z Content-Encoding compress\n"
" reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n"
" -----------\n"
" A sample configuration file is uploaded in \"test\" directory.\n"
" If you specify this option for set \"Content-Encoding\" HTTP \n"
" header, please take care for RFC 2616.\n"
"\n"
" connect_timeout (default=\"10\" seconds)\n"
" connect_timeout (default=\"300\" seconds)\n"
" - time to wait for connection before giving up\n"
"\n"
" readwrite_timeout (default=\"30\" seconds)\n"
" readwrite_timeout (default=\"60\" seconds)\n"
" - time to wait between read/write activity before giving up\n"
"\n"
" max_stat_cache_size (default=\"1000\" entries (about 4MB))\n"
@ -948,7 +1014,8 @@ void show_help (void)
" in stat cache that the object(file or directory) does not exist.\n"
"\n"
" no_check_certificate\n"
" - server certificate won't be checked against the available certificate authorities.\n"
" - server certificate won't be checked against the available \n"
" certificate authorities.\n"
"\n"
" nodnscache (disable dns cache)\n"
" - s3fs is always using dns cache, this option make dns cache disable.\n"
@ -971,13 +1038,15 @@ void show_help (void)
" multipart_size (default=\"10\")\n"
" - part size, in MB, for each multipart request.\n"
"\n"
" fd_page_size (default=\"52428800\"(50MB))\n"
" - number of internal management page size for each file descriptor.\n"
" For delayed reading and writing by s3fs, s3fs manages pages which \n"
" is separated from object. Each pages has a status that data is \n"
" already loaded(or not loaded yet).\n"
" This option should not be changed when you don't have a trouble \n"
" with performance.\n"
" ensure_diskfree (default same multipart_size value)\n"
" - sets MB to ensure disk free space. s3fs makes file for\n"
" downloading, uploading and caching files. If the disk free\n"
" space is smaller than this value, s3fs do not use diskspace\n"
" as possible in exchange for the performance.\n"
"\n"
" singlepart_copy_limit (default=\"5120\")\n"
" - maximum size, in MB, of a single-part copy before trying \n"
" multipart copy.\n"
"\n"
" url (default=\"http://s3.amazonaws.com\")\n"
" - sets the url to use to access amazon s3\n"
@ -1006,11 +1075,13 @@ void show_help (void)
" nomultipart (disable multipart uploads)\n"
"\n"
" enable_content_md5 (default is disable)\n"
" - verifying uploaded object without multipart by content-md5 header.\n"
" - ensure data integrity during writes with MD5 hash.\n"
"\n"
" iam_role (default is no role)\n"
" - set the IAM Role that will supply the credentials from the \n"
" instance meta-data.\n"
" iam_role (default is no IAM role)\n"
" - This option requires the IAM role name or \"auto\". If you specify\n"
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
" to an instance. If you specify this option without any argument, it\n"
" is the same as that you have specified the \"auto\".\n"
"\n"
" noxmlns (disable registering xml name space)\n"
" disable registering xml name space for response of \n"
@ -1040,6 +1111,22 @@ void show_help (void)
" the virtual-host request style, by using the older path request\n"
" style.\n"
"\n"
" noua (suppress User-Agent header)\n"
" Usually s3fs outputs of the User-Agent in \"s3fs/<version> (commit\n"
" hash <hash>; <using ssl library name>)\" format.\n"
" If this option is specified, s3fs suppresses the output of the\n"
" User-Agent.\n"
"\n"
" dbglevel (default=\"crit\")\n"
" Set the debug message level. set value as crit(critical), err\n"
" (error), warn(warning), info(information) to debug level.\n"
" default debug level is critical. If s3fs run with \"-d\" option,\n"
" the debug level is set information. When s3fs catch the signal\n"
" SIGUSR2, the debug level is bumpup.\n"
"\n"
" curldbg - put curl debug message\n"
" Put the debug message from libcurl when this option is specified.\n"
"\n"
"FUSE/mount Options:\n"
"\n"
" Most of the generic mount options described in 'man mount' are\n"
@ -1062,8 +1149,7 @@ void show_help (void)
" disable multi-threaded operation\n"
"\n"
"\n"
"Report bugs to <s3fs-devel@googlegroups.com>\n"
"s3fs home page: <http://code.google.com/p/s3fs/>\n"
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
);
return;
}
@ -1071,12 +1157,12 @@ void show_help (void)
void show_version(void)
{
printf(
"Amazon Simple Storage Service File System V%s with %s\n"
"Amazon Simple Storage Service File System V%s(commit:%s) with %s\n"
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
"License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n",
VERSION, s3fs_crypt_lib_name());
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
return;
}

View File

@ -88,14 +88,10 @@ class AutoLock
{
private:
pthread_mutex_t* auto_mutex;
bool is_locked;
public:
AutoLock(pthread_mutex_t* pmutex = NULL);
explicit AutoLock(pthread_mutex_t* pmutex);
~AutoLock();
bool Lock(void);
bool Unlock(void);
};
//-------------------------------------------------------------------
@ -113,6 +109,7 @@ int is_uid_inculde_group(uid_t uid, gid_t gid);
std::string mydirname(std::string path);
std::string mybasename(std::string path);
int mkdirp(const std::string& path, mode_t mode);
bool check_exist_dir_permission(const char* dirpath);
bool delete_files_in_dir(const char* dir, bool is_remove_own);
time_t get_mtime(const char *s);

View File

@ -17,6 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -85,13 +86,6 @@ string lower(string s)
return s;
}
string IntToStr(int n)
{
stringstream result;
result << n;
return result.str();
}
string trim_left(const string &s, const string &t /* = SPACES */)
{
string d(s);
@ -275,6 +269,104 @@ string get_date_iso8601(time_t tm)
return buf;
}
std::string s3fs_hex(const unsigned char* input, size_t length)
{
std::string hex;
for(size_t pos = 0; pos < length; ++pos){
char hexbuf[3];
snprintf(hexbuf, 3, "%02x", input[pos]);
hex += hexbuf;
}
return hex;
}
char* s3fs_base64(const unsigned char* input, size_t length)
{
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
char* result;
if(!input || 0 >= length){
return NULL;
}
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
parts[0] = (input[rpos] & 0xfc) >> 2;
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
result[wpos++] = base[parts[0]];
result[wpos++] = base[parts[1]];
result[wpos++] = base[parts[2]];
result[wpos++] = base[parts[3]];
}
result[wpos] = '\0';
return result;
}
inline unsigned char char_decode64(const char ch)
{
unsigned char by;
if('A' <= ch && ch <= 'Z'){ // A - Z
by = static_cast<unsigned char>(ch - 'A');
}else if('a' <= ch && ch <= 'z'){ // a - z
by = static_cast<unsigned char>(ch - 'a' + 26);
}else if('0' <= ch && ch <= '9'){ // 0 - 9
by = static_cast<unsigned char>(ch - '0' + 52);
}else if('+' == ch){ // +
by = 62;
}else if('/' == ch){ // /
by = 63;
}else if('=' == ch){ // =
by = 64;
}else{ // something wrong
by = UCHAR_MAX;
}
return by;
}
unsigned char* s3fs_decode64(const char* input, size_t* plength)
{
unsigned char* result;
if(!input || 0 == strlen(input) || !plength){
return NULL;
}
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
return NULL; // ENOMEM
}
unsigned char parts[4];
size_t input_len = strlen(input);
size_t rpos;
size_t wpos;
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
parts[0] = char_decode64(input[rpos]);
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
if(64 == parts[2]){
break;
}
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
if(64 == parts[3]){
break;
}
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
}
result[wpos] = '\0';
*plength = wpos;
return result;
}
/*
* Local variables:
* tab-width: 4

View File

@ -45,7 +45,6 @@ std::string trim_left(const std::string &s, const std::string &t = SPACES);
std::string trim_right(const std::string &s, const std::string &t = SPACES);
std::string trim(const std::string &s, const std::string &t = SPACES);
std::string lower(std::string s);
std::string IntToStr(int);
std::string get_date_rfc850(void);
void get_date_sigv3(std::string& date, std::string& date8601);
std::string get_date_string(time_t tm);
@ -56,6 +55,10 @@ std::string urlDecode(const std::string& s);
bool takeout_str_dquart(std::string& str);
bool get_keyword_value(std::string& target, const char* keyword, std::string& value);
std::string s3fs_hex(const unsigned char* input, size_t length);
char* s3fs_base64(const unsigned char* input, size_t length);
unsigned char* s3fs_decode64(const char* input, size_t* plength);
#endif // S3FS_STRING_UTIL_H_
/*

View File

@ -18,12 +18,14 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <limits>
#include <stdint.h>
#include <string>
#include "string_util.h"
#include "test_util.h"
int main(int argc, char *argv[])
void test_trim()
{
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
@ -40,5 +42,42 @@ int main(int argc, char *argv[])
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
ASSERT_EQUALS(std::string("0"), str(0));
ASSERT_EQUALS(std::string("1"), str(1));
ASSERT_EQUALS(std::string("-1"), str(-1));
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
}
void test_base64()
{
size_t len;
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
ASSERT_EQUALS(len, static_cast<size_t>(1));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
ASSERT_EQUALS(len, static_cast<size_t>(2));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
ASSERT_EQUALS(len, static_cast<size_t>(3));
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
ASSERT_EQUALS(len, static_cast<size_t>(4));
// TODO: invalid input
}
int main(int argc, char *argv[])
{
test_trim();
test_base64();
return 0;
}

View File

@ -29,6 +29,18 @@ template <typename T> void assert_equals(const T &x, const T &y, const char *fil
}
}
void assert_strequals(const char *x, const char *y, const char *file, int line)
{
if(x == NULL && y == NULL){
return;
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
std::exit(1);
}
}
#define ASSERT_EQUALS(x, y) \
assert_equals((x), (y), __FILE__, __LINE__)
#define ASSERT_STREQUALS(x, y) \
assert_strequals((x), (y), __FILE__, __LINE__)

View File

@ -28,8 +28,3 @@ EXTRA_DIST = \
sample_ahbe.conf
testdir = test
test_PROGRAMS=rename_before_close
rename_before_close_SOURCES = rename_before_close.c

View File

@ -1,11 +1,57 @@
#!/bin/bash -e
#!/bin/bash
#
# Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance
# to run tests against S3Proxy locally.
#
# To run against an Amazon S3 or other S3 provider, specify the following
# environment variables:
#
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
# TEST_BUCKET_1=bucketname Name of bucket to use
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
# S3_URL="http://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
#
# Example of running against Amazon S3 using a bucket named "bucket:
#
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh
#
# To change the s3fs-fuse debug level:
#
# DBGLEVEL=debug ./small-integration-test.sh
#
# To stop and wait after the mount point is up for manual interaction. This allows you to
# explore the mounted file system exactly as it would have been started for the test case
#
# INTERACT=1 DBGLEVEL=debug ./small-integration-test.sh
#
# Run all of the tests from the makefile
#
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" make check
#
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
# useful for poking around with plain old curl
#
# PUBLIC=1 INTERACT=1 ./small-integration-test.sh
#
# A valgrind tool can be specified
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
set -o errexit
S3FS=../src/s3fs
S3FS_CREDENTIALS_FILE="passwd-s3fs"
# Allow these defaulted values to be overridden
: ${S3_URL:="http://127.0.0.1:8080"}
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
: ${TEST_BUCKET_1:="s3fs-integration-test"}
TEST_BUCKET_1="s3fs-integration-test"
TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
export TEST_BUCKET_1
export S3_URL
export TEST_SCRIPT_DIR=`pwd`
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
S3PROXY_VERSION="1.4.0"
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
then
@ -14,10 +60,159 @@ then
fi
chmod 600 "$S3FS_CREDENTIALS_FILE"
S3PROXY_VERSION="1.4.0"
S3PROXY_BINARY="s3proxy-${S3PROXY_VERSION}"
if [ ! -e "${S3PROXY_BINARY}" ]; then
wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
-O "${S3PROXY_BINARY}"
chmod +x "${S3PROXY_BINARY}"
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
then
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
fi
# This function execute the function parameters $1 times
# before giving up, with 1 second delays.
function retry {
set +o errexit
N=$1; shift;
status=0
for i in $(seq $N); do
echo "Trying: $@"
$@
status=$?
if [ $status == 0 ]; then
break
fi
sleep 1
echo "Retrying: $@"
done
if [ $status != 0 ]; then
echo "timeout waiting for $@"
fi
set -o errexit
return $status
}
# Proxy is not started if S3PROXY_BINARY is an empty string
# PUBLIC unset: use s3proxy.conf
# PUBLIC=1: use s3proxy-noauth.conf (no request signing)
#
function start_s3proxy {
if [ -n "${PUBLIC}" ]; then
S3PROXY_CONFIG="s3proxy-noauth.conf"
else
S3PROXY_CONFIG="s3proxy.conf"
fi
if [ -n "${S3PROXY_BINARY}" ]
then
if [ ! -e "${S3PROXY_BINARY}" ]; then
wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
--quiet -O "${S3PROXY_BINARY}"
chmod +x "${S3PROXY_BINARY}"
fi
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
# wait for S3Proxy to start
for i in $(seq 30);
do
if exec 3<>"/dev/tcp/127.0.0.1/8080";
then
exec 3<&- # Close for read
exec 3>&- # Close for write
break
fi
sleep 1
done
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
fi
}
function stop_s3proxy {
if [ -n "${S3PROXY_PID}" ]
then
kill $S3PROXY_PID
wait $S3PROXY_PID
fi
}
# Mount the bucket, function arguments passed to s3fs in addition to
# a set of common arguments.
function start_s3fs {
# Public bucket if PUBLIC is set
if [ -n "${PUBLIC}" ]; then
AUTH_OPT="-o public_bucket=1"
else
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
fi
# If VALGRIND is set, pass it as options to valgrind.
# start valgrind-listener in another shell.
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
# Start valgind-listener (default port is 1500)
if [ -n "${VALGRIND}" ]; then
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
fi
# Common s3fs options:
#
# TODO: Allow all these options to be overriden with env variables
#
# sigv2
# Historically because S3Proxy only supports sigv2.
# use_path_request_style
# The test env doesn't have virtual hosts
# createbucket
# S3Proxy always starts with no buckets, this tests the s3fs-fuse
# automatic bucket creation path.
# $AUTH_OPT
# Will be either "-o public_bucket=1"
# or
# "-o passwd_file=${S3FS_CREDENTIALS_FILE}"
# dbglevel
# error by default. override with DBGLEVEL env variable
# -f
# Keep s3fs in foreground instead of daemonizing
#
# subshell with set -x to log exact invocation of s3fs-fuse
(
set -x
stdbuf -oL -eL \
${VALGRIND_EXEC} ${S3FS} \
$TEST_BUCKET_1 \
$TEST_BUCKET_MOUNT_POINT_1 \
-o sigv2 \
-o use_path_request_style \
-o url=${S3_URL} \
-o createbucket \
${AUTH_OPT} \
-o dbglevel=${DBGLEVEL:=info} \
-f \
${@} \
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
)
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
# Quick way to start system up for manual testing with options under test
if [[ -n ${INTERACT} ]]; then
echo "Mountpoint $TEST_BUCKET_MOUNT_POINT_1 is ready"
echo "control-C to quit"
sleep infinity
exit 0
fi
}
function stop_s3fs {
# Retry in case file system is in use
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
fi
}
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
function common_exit_handler {
stop_s3proxy
stop_s3fs
}
trap common_exit_handler EXIT

View File

@ -1,344 +1,421 @@
#!/bin/bash
set -o xtrace
set -o errexit
COMMON=integration-test-common.sh
source $COMMON
source test-utils.sh
# Configuration
TEST_TEXT="HELLO WORLD"
TEST_TEXT_FILE=test-s3fs.txt
TEST_DIR=testdir
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
TEST_TEXT_FILE_LENGTH=15
BIG_FILE=big-file-s3fs.txt
BIG_FILE_LENGTH=$((25 * 1024 * 1024))
function test_append_file {
describe "Testing append to file ..."
# Write a small test file
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
do
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
done > ${TEST_TEXT_FILE}
function mk_test_file {
if [ $# == 0 ]; then
TEXT=$TEST_TEXT
else
TEXT=$1
fi
echo $TEXT > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
# Verify contents of file
echo "Verifying length of test file"
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
then
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
return 1
fi
rm_test_file
}
function rm_test_file {
if [ $# == 0 ]; then
FILE=$TEST_TEXT_FILE
else
FILE=$1
fi
rm -f $FILE
function test_truncate_file {
describe "Testing truncate file ..."
# Write a small test file
echo "${TEST_TEXT}" > ${TEST_TEXT_FILE}
if [ -e $FILE ]
# Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...)
: > ${TEST_TEXT_FILE}
# Verify file is zero length
if [ -s ${TEST_TEXT_FILE} ]
then
echo "Could not cleanup file ${TEST_TEXT_FILE}"
exit 1
echo "error: expected ${TEST_TEXT_FILE} to be zero length"
return 1
fi
rm_test_file
}
function mk_test_dir {
mkdir ${TEST_DIR}
function test_truncate_empty_file {
echo "Testing truncate empty file ..."
# Write an empty test file
touch ${TEST_TEXT_FILE}
if [ ! -d ${TEST_DIR} ]; then
echo "Directory ${TEST_DIR} was not created"
exit 1
# Truncate the file to 1024 length
t_size=1024
truncate ${TEST_TEXT_FILE} -s $t_size
# Verify file is zero length
size=$(stat -c %s ${TEST_TEXT_FILE})
if [ $t_size -ne $size ]
then
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
return 1
fi
rm_test_file
}
function rm_test_dir {
rmdir ${TEST_DIR}
function test_mv_file {
describe "Testing mv file function ..."
# if the rename file exists, delete it
if [ -e $ALT_TEST_TEXT_FILE ]
then
rm $ALT_TEST_TEXT_FILE
fi
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
return 1
fi
# create the test file again
mk_test_file
#rename the test file
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
if [ ! -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not move file"
return 1
fi
# Check the contents of the alt file
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
then
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
return 1
fi
# clean up
rm_test_file $ALT_TEST_TEXT_FILE
}
function test_mv_directory {
describe "Testing mv directory function ..."
if [ -e $TEST_DIR ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
exit 1
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
return 1
fi
mk_test_dir
mv ${TEST_DIR} ${TEST_DIR}_rename
if [ ! -d "${TEST_DIR}_rename" ]; then
echo "Directory ${TEST_DIR} was not renamed"
return 1
fi
rmdir ${TEST_DIR}_rename
if [ -e "${TEST_DIR}_rename" ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
return 1
fi
}
CUR_DIR=`pwd`
TEST_BUCKET_MOUNT_POINT_1=$1
if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then
echo "Mountpoint missing"
exit 1
fi
cd $TEST_BUCKET_MOUNT_POINT_1
function test_redirects {
describe "Testing redirects ..."
if [ -e $TEST_TEXT_FILE ]
then
rm -f $TEST_TEXT_FILE
fi
mk_test_file ABCDEF
# Write a small test file
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
do
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
echo $TEST_TEXT >> $TEST_TEXT_FILE
done
CONTENT=`cat $TEST_TEXT_FILE`
# Verify contents of file
echo "Verifying length of test file"
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
then
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
exit 1
fi
if [ "${CONTENT}" != "ABCDEF" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
return 1
fi
rm_test_file
echo XYZ > $TEST_TEXT_FILE
##########################################################
# Rename test (individual file)
##########################################################
echo "Testing mv file function ..."
CONTENT=`cat $TEST_TEXT_FILE`
# if the rename file exists, delete it
if [ -e $ALT_TEST_TEXT_FILE ]
then
rm $ALT_TEST_TEXT_FILE
fi
if [ ${CONTENT} != "XYZ" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
return 1
fi
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
exit 1
fi
echo 123456 >> $TEST_TEXT_FILE
# create the test file again
mk_test_file
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
#rename the test file
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
if [ ! -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not move file"
exit 1
fi
if [ ${LINE1} != "XYZ" ]; then
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
return 1
fi
# Check the contents of the alt file
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
then
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
exit 1
fi
if [ ${LINE2} != "123456" ]; then
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
return 1
fi
# clean up
rm_test_file $ALT_TEST_TEXT_FILE
# clean up
rm_test_file
}
##########################################################
# Rename test (individual directory)
##########################################################
echo "Testing mv directory function ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
exit 1
fi
function test_mkdir_rmdir {
describe "Testing creation/removal of a directory"
mk_test_dir
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
return 1
fi
mv ${TEST_DIR} ${TEST_DIR}_rename
mk_test_dir
rm_test_dir
}
if [ ! -d "${TEST_DIR}_rename" ]; then
echo "Directory ${TEST_DIR} was not renamed"
exit 1
fi
function test_chmod {
describe "Testing chmod file function ..."
rmdir ${TEST_DIR}_rename
if [ -e "${TEST_DIR}_rename" ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
exit 1
fi
# create the test file again
mk_test_file
###################################################################
# test redirects > and >>
###################################################################
echo "Testing redirects ..."
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
mk_test_file ABCDEF
chmod 777 $TEST_TEXT_FILE;
CONTENT=`cat $TEST_TEXT_FILE`
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE permissions"
return 1
fi
if [ ${CONTENT} != "ABCDEF" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
exit 1
fi
# clean up
rm_test_file
}
echo XYZ > $TEST_TEXT_FILE
function test_chown {
describe "Testing chown file function ..."
CONTENT=`cat $TEST_TEXT_FILE`
# create the test file again
mk_test_file
if [ ${CONTENT} != "XYZ" ]; then
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
exit 1
fi
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
echo 123456 >> $TEST_TEXT_FILE
chown 1000:1000 $TEST_TEXT_FILE;
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
# if they're the same, we have a problem.
if [ $(stat --format=%u:%g $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE ownership"
return 1
fi
if [ ${LINE1} != "XYZ" ]; then
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
exit 1
fi
# clean up
rm_test_file
}
if [ ${LINE2} != "123456" ]; then
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
exit 1
fi
function test_list {
describe "Testing list"
mk_test_file
mk_test_dir
file_cnt=$(ls -1 | wc -l)
if [ $file_cnt != 2 ]; then
echo "Expected 2 file but got $file_cnt"
return 1
fi
rm_test_file
rm_test_dir
}
function test_remove_nonempty_directory {
describe "Testing removing a non-empty directory"
mk_test_dir
touch "${TEST_DIR}/file"
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
rm "${TEST_DIR}/file"
rm_test_dir
}
function test_rename_before_close {
describe "Testing rename before close ..."
(
echo foo
mv $TEST_TEXT_FILE ${TEST_TEXT_FILE}.new
) > $TEST_TEXT_FILE
if ! cmp <(echo foo) ${TEST_TEXT_FILE}.new; then
echo "rename before close failed"
return 1
fi
rm_test_file ${TEST_TEXT_FILE}.new
rm -f ${TEST_TEXT_FILE}
}
function test_multipart_upload {
describe "Testing multi-part upload ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
# Verify contents of file
echo "Comparing test file"
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}"
then
return 1
fi
rm -f "/tmp/${BIG_FILE}"
rm_test_file "${BIG_FILE}"
}
function test_multipart_copy {
describe "Testing multi-part copy ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
mv "${BIG_FILE}" "${BIG_FILE}-copy"
# Verify contents of file
echo "Comparing test file"
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}-copy"
then
return 1
fi
rm -f "/tmp/${BIG_FILE}"
rm_test_file "${BIG_FILE}-copy"
}
function test_special_characters {
describe "Testing special characters ..."
ls 'special' 2>&1 | grep -q 'No such file or directory'
ls 'special?' 2>&1 | grep -q 'No such file or directory'
ls 'special*' 2>&1 | grep -q 'No such file or directory'
ls 'special~' 2>&1 | grep -q 'No such file or directory'
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
}
function test_symlink {
describe "Testing symlinks ..."
rm -f $TEST_TEXT_FILE
rm -f $ALT_TEST_TEXT_FILE
echo foo > $TEST_TEXT_FILE
ln -s $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
cmp $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
rm -f $TEST_TEXT_FILE
[ -L $ALT_TEST_TEXT_FILE ]
[ ! -f $ALT_TEST_TEXT_FILE ]
}
function test_extended_attributes {
command -v setfattr >/dev/null 2>&1 || \
{ echo "Skipping extended attribute tests" ; return; }
describe "Testing extended attributes ..."
rm -f $TEST_TEXT_FILE
touch $TEST_TEXT_FILE
# set value
setfattr -n key1 -v value1 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
# append value
setfattr -n key2 -v value2 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
# remove value
setfattr -x key1 $TEST_TEXT_FILE
! getfattr -n key1 --only-values $TEST_TEXT_FILE
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
}
function test_mtime_file {
describe "Testing mtime preservation function ..."
# if the rename file exists, delete it
if [ -e $ALT_TEST_TEXT_FILE -o -L $ALT_TEST_TEXT_FILE ]
then
rm $ALT_TEST_TEXT_FILE
fi
if [ -e $ALT_TEST_TEXT_FILE ]
then
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
return 1
fi
# create the test file again
mk_test_file
sleep 2 # allow for some time to pass to compare the timestamps between test & alt
#copy the test file with preserve mode
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
testmtime=`stat -c %Y $TEST_TEXT_FILE`
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
if [ "$testmtime" -ne "$altmtime" ]
then
echo "File times do not match: $testmtime != $altmtime"
return 1
fi
}
function test_rm_rf_dir {
describe "Test that rm -rf will remove directory with contents"
# Create a dir with some files and directories
mkdir dir1
mkdir dir1/dir2
touch dir1/file1
touch dir1/dir2/file2
# Remove the dir with recursive rm
rm -rf dir1
if [ -e dir1 ]; then
echo "rm -rf did not remove $PWD/dir1"
return 1
fi
}
function test_write_after_seek_ahead {
describe "Test writes succeed after a seek ahead"
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
rm testfile
}
# clean up
rm_test_file
function add_all_tests {
add_tests test_append_file
add_tests test_truncate_file
add_tests test_truncate_empty_file
add_tests test_mv_file
add_tests test_mv_directory
add_tests test_redirects
add_tests test_mkdir_rmdir
add_tests test_chmod
add_tests test_chown
add_tests test_list
add_tests test_remove_nonempty_directory
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
#add_tests test_rename_before_close
add_tests test_multipart_upload
# TODO: test disabled until S3Proxy 1.5.0 is released
#add_tests test_multipart_copy
add_tests test_special_characters
add_tests test_symlink
add_tests test_extended_attributes
add_tests test_rm_rf_dir
add_tests test_write_after_seek_ahead
}
#####################################################################
# Simple directory test mkdir/rmdir
#####################################################################
echo "Testing creation/removal of a directory"
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
exit 1
fi
mk_test_dir
rm_test_dir
##########################################################
# File permissions test (individual file)
##########################################################
echo "Testing chmod file function ..."
# create the test file again
mk_test_file
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
chmod 777 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE permissions"
exit 1
fi
# clean up
rm_test_file
##########################################################
# File permissions test (individual file)
##########################################################
echo "Testing chown file function ..."
# create the test file again
mk_test_file
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
chown 1000:1000 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE ownership"
exit 1
fi
# clean up
rm_test_file
##########################################################
# Testing list
##########################################################
echo "Testing list"
mk_test_file
mk_test_dir
file_cnt=$(ls -1 | wc -l)
if [ $file_cnt != 2 ]; then
echo "Expected 2 file but got $file_cnt"
exit 1
fi
rm_test_file
rm_test_dir
##########################################################
# Testing rename before close
##########################################################
if false; then
echo "Testing rename before close ..."
$CUR_DIR/rename_before_close $TEST_TEXT_FILE
if [ $? != 0 ]; then
echo "rename before close failed"
exit 1
fi
# clean up
rm_test_file
fi
##########################################################
# Testing multi-part upload
##########################################################
echo "Testing multi-part upload ..."
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
# Verify contents of file
echo "Comparing test file"
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}"
then
exit 1
fi
rm -f "/tmp/${BIG_FILE}"
rm -f "${BIG_FILE}"
##########################################################
# Testing special characters
##########################################################
echo "Testing special characters ..."
ls 'special' 2>&1 | grep -q 'No such file or directory'
ls 'special?' 2>&1 | grep -q 'No such file or directory'
ls 'special*' 2>&1 | grep -q 'No such file or directory'
ls 'special~' 2>&1 | grep -q 'No such file or directory'
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
##########################################################
# Testing extended attributes
##########################################################
rm -f $TEST_TEXT_FILE
touch $TEST_TEXT_FILE
# set value
setfattr -n key1 -v value1 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
# append value
setfattr -n key2 -v value2 $TEST_TEXT_FILE
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
# remove value
setfattr -x key1 $TEST_TEXT_FILE
! getfattr -n key1 --only-values $TEST_TEXT_FILE
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
#####################################################################
# Tests are finished
#####################################################################
# Unmount the bucket
cd $CUR_DIR
echo "All tests complete."
init_suite
add_all_tests
run_suite

View File

@ -1,88 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
static const char FILE_CONTENT[] = "XXXX";
#define PROG "rename_before_close"
static char *
filename_to_mkstemp_template(const char *file)
{
size_t len = strlen(file);
static const char suffix[] = ".XXXXXX";
size_t new_len = len + sizeof(suffix);
char *ret_str = calloc(1, new_len);
int ret = snprintf(ret_str, new_len, "%s%s", file, suffix);
assert(ret == new_len - 1);
assert(ret_str[new_len] == '\0');
return ret_str;
}
static off_t
get_file_size(const char *file)
{
struct stat ss;
printf(PROG ": stat(%s)\n", file);
int ret = lstat(file, &ss);
assert(ret == 0);
return ss.st_size;
}
static void
test_rename_before_close(const char *file)
{
char *template = filename_to_mkstemp_template(file);
printf(PROG ": mkstemp(%s)\n", template);
int fd = mkstemp(template);
assert(fd >= 0);
sleep(1);
printf(PROG ": write(%s)\n", template);
int ret = write(fd, FILE_CONTENT, sizeof(FILE_CONTENT));
assert(ret == sizeof(FILE_CONTENT));
sleep(1);
printf(PROG ": fsync(%s)\n", template);
ret = fsync(fd);
assert(ret == 0);
sleep(1);
assert(get_file_size(template) == sizeof(FILE_CONTENT));
sleep(1);
printf(PROG ": rename(%s, %s)\n", template, file);
ret = rename(template, file);
assert(ret == 0);
sleep(1);
printf(PROG ": close(%s)\n", file);
ret = close(fd);
assert(ret == 0);
sleep(1);
assert(get_file_size(file) == sizeof(FILE_CONTENT));
}
int
main(int argc, char *argv[])
{
setvbuf(stdout, NULL, _IONBF, 0);
if (argc < 2) {
printf("Usage: %s <file>", argv[0]);
return 1;
}
test_rename_before_close(argv[1]);
return 0;
}

View File

@ -4,21 +4,27 @@
# s3fs loads this file at starting.
#
# Format:
# line = [file suffix] HTTP-header [HTTP-header-values]
# line = [file suffix or regex] HTTP-header [HTTP-header-values]
# file suffix = file(object) suffix, if this field is empty,
# it means "*"(all object).
# it means "reg:(.*)".(=all object).
# regex = regular expression to match the file(object) path.
# this type starts with "reg:" prefix.
# HTTP-header = additional HTTP header name
# HTTP-header-values = additional HTTP header value
#
# <suffix(extension)> <HTTP header> <HTTP header values>
#
# Verification is done in the order in which they are described in the file.
# That order is very important.
#
# Example:
# " Content-Encoding gzip" --> all object
# ".gz Content-Encoding gzip" --> only ".gz" extension file
# " Content-Encoding gzip" --> all object
# ".gz Content-Encoding gzip" --> only ".gz" extension file
# "reg:^/DIR/(.*).t2$ Content-Encoding text2" --> "/DIR/*.t2" extension file
#
# Notice:
# If you need to set all object, you can specify without "suffix".
# Then all of object(file) is added additional header.
# If you need to set all object, you can specify without "suffix" or regex
# type "reg:(.*)". Then all of object(file) is added additional header.
# If you have this configuration file for Content-Encoding, you should
# know about RFC 2616.
#
@ -27,15 +33,20 @@
# Encoding header, and SHOULD NOT be used in the Content-Encoding
# header."
#
.gz Content-Encoding gzip
.Z Content-Encoding compress
.bz2 Content-Encoding bzip2
.svgz Content-Encoding gzip
.svg.gz Content-Encoding gzip
.tgz Content-Encoding gzip
.tar.gz Content-Encoding gzip
.taz Content-Encoding gzip
.tz Content-Encoding gzip
.tbz2 Content-Encoding gzip
gz.js Content-Encoding gzip
# file suffix type
.gz Content-Encoding gzip
.Z Content-Encoding compress
.bz2 Content-Encoding bzip2
.svgz Content-Encoding gzip
.svg.gz Content-Encoding gzip
.tgz Content-Encoding gzip
.tar.gz Content-Encoding gzip
.taz Content-Encoding gzip
.tz Content-Encoding gzip
.tbz2 Content-Encoding gzip
gz.js Content-Encoding gzip
# regex type(test)
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2

View File

@ -1,69 +1,30 @@
#!/bin/bash
set -o xtrace
#
# Test s3fs-fuse file system operations with
#
set -o errexit
# Require root
REQUIRE_ROOT=require-root.sh
#source $REQUIRE_ROOT
source integration-test-common.sh
function retry {
set +o errexit
N=$1; shift;
status=0
for i in $(seq $N); do
$@
status=$?
if [ $status == 0 ]; then
break
fi
sleep 1
done
start_s3proxy
if [ $status != 0 ]; then
echo "timeout waiting for $@"
fi
set -o errexit
return $status
}
#
# enable_content_md5
# Causes s3fs to validate file contents. This isn't included in the common
# options used by start_s3fs because tests may be performance tests
# singlepart_copy_limit
# Appeared in upstream s3fs-fuse tests, possibly a limitation of S3Proxy
# TODO: github archaeology to see why it was added.
#
start_s3fs -o enable_content_md5 \
-o singlepart_copy_limit=$((10 * 1024))
function exit_handler {
kill $S3PROXY_PID
retry 30 fusermount -u $TEST_BUCKET_MOUNT_POINT_1
}
trap exit_handler EXIT
./integration-test-main.sh
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties s3proxy.conf | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
# wait for S3Proxy to start
for i in $(seq 30);
do
if exec 3<>"/dev/tcp/localhost/8080";
then
exec 3<&- # Close for read
exec 3>&- # Close for write
break
fi
sleep 1
done
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
# Mount the bucket
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
then
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
fi
stdbuf -oL -eL $S3FS $TEST_BUCKET_1 $TEST_BUCKET_MOUNT_POINT_1 \
-o createbucket \
-o passwd_file=$S3FS_CREDENTIALS_FILE \
-o sigv2 \
-o url=http://127.0.0.1:8080 \
-o use_path_request_style -f -o f2 -d -d |& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
retry 30 grep $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
./integration-test-main.sh $TEST_BUCKET_MOUNT_POINT_1
echo "All tests complete."
echo "$0: tests complete."

156
test/test-utils.sh Normal file
View File

@ -0,0 +1,156 @@
#### Test utils
set -o errexit
# Configuration
TEST_TEXT="HELLO WORLD"
TEST_TEXT_FILE=test-s3fs.txt
TEST_DIR=testdir
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
TEST_TEXT_FILE_LENGTH=15
BIG_FILE=big-file-s3fs.txt
BIG_FILE_LENGTH=$((25 * 1024 * 1024))
export RUN_DIR
function mk_test_file {
if [ $# == 0 ]; then
TEXT=$TEST_TEXT
else
TEXT=$1
fi
echo $TEXT > $TEST_TEXT_FILE
if [ ! -e $TEST_TEXT_FILE ]
then
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
exit 1
fi
}
function rm_test_file {
if [ $# == 0 ]; then
FILE=$TEST_TEXT_FILE
else
FILE=$1
fi
rm -f $FILE
if [ -e $FILE ]
then
echo "Could not cleanup file ${TEST_TEXT_FILE}"
exit 1
fi
}
function mk_test_dir {
mkdir ${TEST_DIR}
if [ ! -d ${TEST_DIR} ]; then
echo "Directory ${TEST_DIR} was not created"
exit 1
fi
}
function rm_test_dir {
rmdir ${TEST_DIR}
if [ -e $TEST_DIR ]; then
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
exit 1
fi
}
# Create and cd to a unique directory for this test run
# Sets RUN_DIR to the name of the created directory
function cd_run_dir {
if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then
echo "TEST_BUCKET_MOUNT_POINT variable not set"
exit 1
fi
RUN_DIR=$(mktemp --directory ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
cd ${RUN_DIR}
}
function clean_run_dir {
if [ -d ${RUN_DIR} ]; then
rm -rf ${RUN_DIR} || echo "Error removing ${RUN_DIR}"
fi
}
# Resets test suite
function init_suite {
TEST_LIST=()
TEST_FAILED_LIST=()
TEST_PASSED_LIST=()
}
# Report a passing test case
# report_pass TEST_NAME
function report_pass {
echo "$1 passed"
TEST_PASSED_LIST+=($1)
}
# Report a failing test case
# report_fail TEST_NAME
function report_fail {
echo "$1 failed"
TEST_FAILED_LIST+=($1)
}
# Add tests to the suite
# add_tests TEST_NAME...
function add_tests {
TEST_LIST+=("$@")
}
# Log test name and description
# describe [DESCRIPTION]
function describe {
echo "${FUNCNAME[1]}: "$@""
}
# Runs each test in a suite and summarizes results. The list of
# tests added by add_tests() is called with CWD set to a tmp
# directory in the bucket. An attempt to clean this directory is
# made after the test run.
function run_suite {
orig_dir=$PWD
cd_run_dir
for t in "${TEST_LIST[@]}"; do
# The following sequence runs tests in a subshell to allow continuation
# on test failure, but still allowing errexit to be in effect during
# the test.
#
# See:
# https://groups.google.com/d/msg/gnu.bash.bug/NCK_0GmIv2M/dkeZ9MFhPOIJ
# Other ways of trying to capture the return value will also disable
# errexit in the function due to bash... compliance with POSIX?
set +o errexit
(set -o errexit; $t)
if [[ $? == 0 ]]; then
report_pass $t
else
report_fail $t
fi
set -o errexit
done
cd ${orig_dir}
clean_run_dir
for t in "${TEST_PASSED_LIST[@]}"; do
echo "PASS: $t"
done
for t in "${TEST_FAILED_LIST[@]}"; do
echo "FAIL: $t"
done
passed=${#TEST_PASSED_LIST[@]}
failed=${#TEST_FAILED_LIST[@]}
echo "SUMMARY for $0: $passed tests passed. $failed tests failed."
if [[ $failed != 0 ]]; then
return 1
else
return 0
fi
}