223 Commits

Author SHA1 Message Date
b56f9d349c Temporary test error workaround in Ubuntu 25.10 2025-10-23 02:38:13 +09:00
9da9db069c Change CI test macos from macos-13 to macos-14 2025-10-20 06:23:32 +09:00
11a362939f Temporary handling of atime/ctime in test for Ubuntu25.10 2025-10-16 06:50:04 +09:00
49ab488f88 Add missing parallelism to memory tests (#2744) 2025-10-14 19:13:50 +09:00
5cce8a4ceb Upgrade CI to Fedora 43 (#2743) 2025-10-14 19:06:45 +09:00
b147c66c1b Fix typos (#2742) 2025-10-14 19:04:34 +09:00
423683825a Add missing diffutils for openSUSE Leap CI, as 16.0 misses cmp otherwise 2025-10-14 14:19:12 +09:00
f364450dfc Add openSUSE Leap 16.0 to the CI 2025-10-14 14:19:12 +09:00
b2e318c5c7 Added a flag to prevent stats cache expiration checks 2025-10-14 01:40:03 +09:00
52b263b99c Improved handling of XML parser errors 2025-10-13 13:36:40 +09:00
a9b9631c5c Fixed to not call xmlReadMemory if data length is 0 2025-10-12 04:18:08 +09:00
2cb869dfd2 The Truncate method of the StaCache class has been consolidated (#2729) 2025-10-11 02:13:49 +09:00
52835103f1 Upgrade CI to Ubuntu 25.10 2025-10-10 02:27:52 +09:00
ba386a8d7a Changed StatCache code and Improved small performance 2025-10-10 02:10:00 +09:00
c719e36f91 Fixed negative stat cache data was not working 2025-10-10 02:09:19 +09:00
735fe9352a Fixed Stat cache expire check processing (#2708) 2025-10-04 02:20:07 +09:00
4513e4f700 Use rockylinux/rockylinux instead of rockylinux (#2734)
The latter is stuck on 9.3 and 8.9 instead of the latest 9.6 and 8.10:

https://hub.docker.com/_/rockylinux

Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2025-10-04 00:59:14 +09:00
37e593aeb0 Changed file stat times(a/c/mtime) management 2025-10-03 13:59:41 +09:00
60fb557f14 Improved stat cache efficiency by not delete but only register 2025-09-11 17:20:40 +09:00
b7b5a108c2 Separate pjdfstest tests by command to make the log units smaller 2025-09-09 05:38:44 +09:00
0bf901eff7 Fixed test_external_modification test for MacOS 2025-09-02 05:20:04 +09:00
8408af8695 Replaced the free call with std::string 2025-08-31 02:34:36 +09:00
29c9e50f4e Remove unused aws-cli in alpine (#2727) 2025-08-31 00:00:49 +09:00
8cf28d71b8 Consolidate dnf calls for static-checks (#2726) 2025-08-31 00:00:10 +09:00
709cdfc604 Check integrity of downloaded binaries (#2723)
Usually package managers perform this check but we need to do it
ourselves for custom binaries.
2025-08-30 23:59:22 +09:00
eaa2a90a56 Deduplicate CentOS 9 and 10 (#2718)
EPEL is no longer required.
2025-08-30 23:56:49 +09:00
f1e836c725 Only install clang in MemoryTest task (#2719) 2025-08-30 23:55:57 +09:00
ff2080a39e Centralize C++ version in Makefiles (#2713) 2025-08-30 16:49:51 +09:00
1366f582b1 Replace sstream header with iosfwd in headers (#2712)
The latter is smaller and sufficient for parameters.
2025-08-30 16:47:00 +09:00
03583f3424 Upgrade to S3Proxy 2.7.0 (#2656)
Release notes:
https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.6.0
https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.7.0
2025-08-30 16:45:02 +09:00
fcaacd5397 Remove ut_test.py (#2722)
pjdfstest supersedes this.
2025-08-30 16:37:46 +09:00
4b46b7a811 Only install binaries in static-checks task (#2717)
Also remove outdated version checks.
2025-08-30 16:34:26 +09:00
e55c37ddab Clean up Valgrind installation (#2716) 2025-08-30 16:18:03 +09:00
0448ff460b Add S3 operation performance counters (#2715)
These can be used to evaluate changes like #2707.  Ideally tests could
assert how many operations they expect although this will require a
localhost HTTP server.
2025-08-30 16:13:30 +09:00
28771e5757 Changed to use rename when serializing to FileCacheStat 2025-08-30 01:21:13 +09:00
da17cace4f Fixed test_update_time_chown in test (#2720) 2025-08-29 03:11:18 +09:00
87d7a5822e Use curl instead of AWS CLI (#2689)
The latter starts up significantly slower which impedes integration
test times.  curl has some limitations, e.g., no SSE support.
2025-08-28 08:36:07 +09:00
e8b5a4109a Remove unused pip dependency (#2711)
df7bbb28d5 removed the use of this.
Also convert string to an array to reduce merge conflicts and
alphabetize packages for readability.
2025-08-27 08:25:38 +09:00
bae0facba3 Changed to serialize CacheFileStat after flushing a file 2025-08-26 16:54:39 +09:00
ecdcb4a836 Fixed unnecessary conversion to in DirStatCache::TruncateCacheHasLock (#2706) 2025-08-22 19:57:36 +09:00
066a2f8fa6 Added DirStatCache::GetChildLeafNameHasLock method 2025-08-22 06:48:06 +09:00
629207791e Remove unused S3FS_PTHREAD_ERRORCHECK (#2703) 2025-08-21 20:40:54 +09:00
b411e40d6b Add Debian trixie (#2702) 2025-08-21 20:36:17 +09:00
b1b9fb55d9 Use junk_data instead of urandom (#2700)
The latter seems to block on macOS sometimes.
2025-08-21 20:34:30 +09:00
666fea3d26 Remove unneeded x-prefix in comparisons
Found via shellcheck.  Reference:

https://www.shellcheck.net/wiki/SC2268
2025-08-21 06:11:03 +09:00
7112471a80 small spelling fix: 'no' to 'not' 2025-08-03 01:36:10 +09:00
50bb76f968 Remove S3FS_MALLOC_TRIM (#2699)
42b74c9d2e introduced this flag but it
is not clear that this behavior is required with recent libxml2
version.
2025-08-01 10:47:58 +09:00
c78517d410 Convert #if defined to #ifdef where possible (#2698)
Suggested by clang-tidy.
2025-08-01 10:43:44 +09:00
57b5d367f2 Deduplicate case-insensitive functors (#2697) 2025-07-29 22:28:06 +09:00
41ef4b6495 Convert s3fs_log_level to a strong enum (#2695) 2025-07-29 22:26:53 +09:00
0c559778bb Fixed typos in PR #2681 2025-07-29 10:53:24 +09:00
5a2a7ca4db Refactor to change StatCache class and add StatCacheNode classes 2025-07-25 03:45:27 +09:00
8faebbc7fc Add Rocky Linux 10 to CI (#2688) 2025-06-29 11:44:18 +09:00
8d68b8a03c Refactor to move functions and its declarations to appropriate files 2025-06-27 15:28:44 +09:00
97659c41f2 Fixed bugs in removing xattrs func and and test for it 2025-06-27 03:16:32 +09:00
b624596685 Simplify temporary file creation via mktemp (#2691)
Previously this used /dev/urandom which does not guarantee uniqueness
and sometimes blocked on macOS.  References #2690.
2025-06-26 19:05:16 +09:00
be28fbc7b8 Refactor StatCache words from NoObject to Negative 2025-06-10 10:36:48 +09:00
778059279b Upgrade CI to Alpine 3.22 2025-06-09 21:11:34 +09:00
5bc46ff1ba Removed the RUN_DIR variable which is no longer global 2025-06-09 21:10:42 +09:00
47231fc5fb Disable new S3 checksums for S3Proxy compatibility (#2686)
References aws/aws-cli#9214.
2025-06-09 19:31:46 +09:00
f1a954cbcb Refactor StatCache truncate processing 2025-06-08 23:51:31 +09:00
c620262d3d Fixed s3fs_flush to update pending meta for macos and nomultipart mode 2025-06-08 22:05:50 +09:00
63402bb556 Fixed the random string generation in test script for macos 2025-06-08 22:03:56 +09:00
c869b3996f Improve error handling (#2671)
Found via C++17 [[nodiscard]].
2025-06-07 14:58:06 +09:00
0e5bccc20b Simplify temporary file handling in CI scripts (#2680) 2025-06-07 14:56:32 +09:00
872f53d35a Remove Ubuntu 20.04 from CI (#2677)
This is EOL:
https://ubuntu.com/blog/ubuntu-20-04-lts-end-of-life-standard-support-is-coming-to-an-end-heres-how-to-prepare
2025-06-07 14:45:33 +09:00
109c968baa Changed some INFO level log messages to DBG level 2025-06-07 00:16:44 +09:00
3d6975b369 Fixed a bug when changing fdentity to a temporary path 2025-05-22 03:04:03 +09:00
f2542f22fe Require C++14 (#2596)
This only has some minor additions of std::make_unique, digits
separators, std::string literals, and more flexible constexpr.
References #2469.
2025-05-18 12:34:53 +09:00
3421025074 Use region instead of endpoint for configuration (#2669)
This is more consistent with the AWS docs.  Generally endpoint refers
to an HTTP URL not just the region.  Fixes #2668.
2025-05-18 10:49:35 +09:00
43f49b15e8 Reduce use of awk in tests (#2662) 2025-04-20 10:03:07 +09:00
093d223799 Use Fedora 42 for other CI functions (#2661) 2025-04-20 09:59:52 +09:00
853404a3ce Upgrade Alpine, OpenSuSE, Rocky, and Ubuntu to Java 21 (#2660) 2025-04-19 09:24:14 +09:00
30f9378dec Upgrade CI to Ubuntu 25.04 (#2659) 2025-04-19 09:21:39 +09:00
e083825f55 Upgrade CI to Fedora 42 (#2658)
Simplify argument parsing with cut to work around lack of awk.
2025-04-18 23:45:07 +09:00
22ca6ba6ee Updated CI test result for macos about updating xattr 2025-04-03 23:48:48 +09:00
04a82583d1 A case of HEAD response for mp is different on compatible storage 2025-04-02 23:04:27 -07:00
ad4646f027 Ask for the provider on support requests (#2652) 2025-04-02 22:59:03 +09:00
bfd27460cc Update COMPILATION.md
fix typo
2025-03-18 22:00:26 -07:00
ee1ff8ba75 Update COMPILATION.md
add hint for windows compilation
2025-03-18 22:00:26 -07:00
885b1efac6 change the way to get existing fdentity to optimize concurrent IO performance (#2623)
* change the way to get existing fdentity

* fix compiling err for ver > 1.91

* use GetROPath instead of GetPath

* compare FdEntity::ro_path first in FdEntity::GetFdEntityHasLock()

---------

Co-authored-by: fangqianan.fqa <fangqianan.fqa@alibaba-inc.com>
2025-03-01 01:56:07 +09:00
e63fe7ec65 Added backup variable for fdcache entity path (#2637) 2025-02-15 12:19:58 +09:00
dc92b1b087 Remove unneeded uses of std::map::operator[] (#2642)
These unintentionally mutate the map.  Script suggested by @danmar.
2025-02-09 11:21:44 +09:00
edf4141ad6 Updated to the new issue template workflow 2025-01-26 11:51:10 -08:00
dd4f1395ca Run passing tests from pjdfstest (#1882)
This downloads a tarball by hash instead of using a submodule.
References #1589.
2025-01-19 10:26:40 +09:00
84dcf34e2c Fixed refactoring mistakes about loading IAM credentials 2025-01-19 03:24:30 +09:00
b679e1db98 Fixed memory leak found by valgrind 2025-01-19 03:22:11 +09:00
be183c0323 Pin AWS CLI to work around S3Proxy limitation (#2633)
Also specify architecture to allow future ARM64 CI to work.
2025-01-18 10:56:18 +09:00
3df1195ae5 Expand clang-tidy CI target to all static-checks (#2625)
Relocate cppcheck and Shellcheck into a single CI target instead of
running them as part of all distributions.  While this modestly
reduces run-time by about 20 seconds, more importantly it avoids
workarounds for older checker versions and simplifies the code.
2025-01-03 15:04:58 +09:00
cd41bddd1e Upgrade to S3Proxy 2.5.0 (#2627)
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.5.0
2025-01-03 15:01:15 +09:00
87874caf95 Remove double free in DestroyCurlShareHandle (#2626)
ShareHandles.erase implicitly calls this via the unique_ptr
destructor.
2025-01-03 14:59:06 +09:00
b83c2852b8 Upgrade CI to Alpine 3.21 (#2620) 2024-12-15 10:19:29 +09:00
5e39eff403 Remove explicit std::string constructors (#2619)
char * automatically convert via the implicit std::string constructor.
2024-12-15 10:18:44 +09:00
6c77cd8780 Fixed a bug in check_service_req_threadworker 2024-12-15 09:51:42 +09:00
669cba3240 Address some 32-bit warnings (#2615) 2024-12-01 10:32:28 +09:00
d4f3fb01fc Make some methods const (#2614)
Found via cppcheck --inconclusive.
2024-12-01 10:31:03 +09:00
65e4aef2a1 Prefer C++-style casts over C-style casts (#2599)
The former are easier to identify.  Found via clang-tidy.

Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-12-01 10:29:08 +09:00
d13396127c Delete Semaphore copy and move methods (#2613)
This matches the macOS implementation.
2024-12-01 10:28:50 +09:00
990d2e0074 Add top-level clang-tidy make target 2024-11-30 03:59:40 +09:00
61abf80197 Organized multi-threading related options 2024-11-29 05:13:06 +09:00
956e8c5750 Added new class for curl share handle
Added new class for curl share handle.
And, paired the curl handle(S3fsCurl) with the worker thread.
Changed that each thread has its own SSL session cache to prevent data
races.
So OpenSSL suppression for ThreadSanitizer is no longer necessary, so
reverted it.
2024-11-28 03:40:40 +09:00
bfc3ea767a Removed last use of S3fsMultiCurl and changed those thread handling 2024-11-26 10:47:03 +09:00
499577c2a9 Refactored for standardizing content and copy handling for Multipart Upload 2024-11-25 05:48:38 +09:00
143284b2f3 Upgrade to S3Proxy 2.4.1 (#2433)
This transitions to the transient-nio2 storage backend which should
address a race condition with getBlob and be easier to work with in
the future.  Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.4.1
https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.4.0
https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.3.0
https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.2.0
https://github.com/gaul/s3proxy/releases/tag/s3proxy-2.1.0
2024-11-24 22:27:24 +09:00
7410b95db2 Refactored parallel get object request 2024-11-23 02:22:27 +09:00
86b5c9d88e Refactored multipart put head request 2024-11-16 09:35:23 +09:00
a1e47bc287 Changed multiple Head requests from S3fsMultiCurl to ThreadPoolMan 2024-11-14 07:41:03 +09:00
efc23316e9 Refactored single type requests to use through ThreadPoolMan 2024-11-12 09:08:14 +09:00
a680d3e138 Removed CurlHandlerPool and simplified it
Removed CurlHandlerPool and simplified it.
And against data race in OpenSSL, temporarily changed to not use Sessin cache.
2024-11-12 07:33:04 +09:00
cc29bea81b Call std::get_time instead of strptime (#2598)
This reduces the Windows-specific code.
2024-11-09 19:28:40 +09:00
af0d7ba45e Ensure fdent_data_lock is acquired after fdent_lock (#2595) 2024-11-09 19:28:13 +09:00
17d0970244 Changed the macOS Github Actions runner image to macos-13
Changed the macos Github Actions runner image to macos-13, and
avoided extended attribute error when copying with macos-fuse-t.
2024-11-07 16:51:53 +09:00
45b32046cd Consolidate lower and upper logic (#2594) 2024-11-06 00:07:12 +09:00
a101b88114 Compare case-insensitively instead of copying (#2593)
Also remove some code duplication.
2024-11-06 00:05:05 +09:00
d9ccdc4fce Look up header values directly (#2592)
stat_cache_entry::meta uses a case-insensitive comparator so there is
no need to loop over each entry to compare each key with lowercase.
2024-11-05 07:16:19 +09:00
7a989a58a0 Remove expensive log message during s3fs_getxattr (#2590) 2024-11-05 00:00:58 +09:00
82f694e473 Avoid unneeded std::string copies (#2589) 2024-11-04 23:59:54 +09:00
9a155c81a7 Enable clang-analyzer (#2588)
Also fix a few smaller issues.
2024-11-04 23:58:43 +09:00
9b888fa9b3 Fixed readdir bug with objects receiving EPERM on HEAD request 2024-11-04 15:25:12 +09:00
ef6c213471 Bypassed test_extended_attributes test on MacOS 2024-11-03 08:56:48 +09:00
90ea57b99b Add Fedora 41 and remove Fedora 39 from CI (#2580) 2024-11-02 23:55:43 +09:00
330cb39daf Remove CentOS 7 from CI (#2579)
CentOS 7 is EOL and thus unsupported.  This reverts commit
44d5b5e1c9.
2024-10-29 18:23:36 +09:00
b87a8400e3 Use pass-by-value for peeloff (#2578)
This avoids copies when used with std::move.
2024-10-29 18:21:07 +09:00
3b226ed672 Make psemaphore similar to C++20 std::counting_semaphore (#2569) 2024-10-29 08:23:05 +09:00
07881195f2 Add missing mutex header (#2576)
Found via clang-tidy.
2024-10-29 00:11:28 +09:00
08a5d35f34 Add Ubuntu 24.10 to CI (#2575) 2024-10-29 00:10:44 +09:00
561ce1e206 Update ChangeLog and configure.ac for 1.95
Fixes #2496.
2024-10-25 10:42:53 -07:00
7cb46db945 Add missing string header (#2574)
Found via clang-tidy.
2024-10-25 16:13:57 +09:00
fe82477a6b Add missing utility header for std::move (#2572)
Found via clang-tidy.
2024-10-25 14:55:01 +09:00
b8e56a40b2 Fixed a bug in clearing the queue accumulated during USR1 processing 2024-10-25 14:47:15 +09:00
3ff93d7342 Simplify bucket_block_count initialization (#2571) 2024-10-25 08:37:48 +09:00
e43de21e43 Separate clang-tidy into its own CI task (#2567)
clang-tidy takes 4 minutes on my laptop compared to ALL_TESTS=1 which
takes 8 minutes.  Using a separate tasks avoids duplicating clang-tidy
and unnecessarily slowing CI run-time.
2024-10-24 08:25:30 +09:00
31061416bc Separate serialization and deserialization code (#2566)
This is clearer than a bool parameter.
2024-10-24 08:22:35 +09:00
a8af6cb3b4 Run clang-tidy against test files (#2568) 2024-10-23 20:46:01 +09:00
fe0a62118d Remove some unused parameters (#2565) 2024-10-22 20:34:22 +09:00
cc5271ef2b Enable clang-tidy narrowing conversions (#2564) 2024-10-22 19:55:10 +09:00
d35b5a8905 Add OpenSSL suppression for ThreadSanitizer (#2559) 2024-10-22 19:52:27 +09:00
64c96e89c5 Expand use of auto (#2563)
Found via clang-tidy.
2024-10-22 19:43:12 +09:00
9c4fcbd050 Use std::max instead of conditional (#2562)
Found via clang-tidy.
2024-10-22 19:22:39 +09:00
b34e2711a7 Fixed warnings on integer comparisons (#2558)
Follows on to 2d1409a672.
2024-10-22 18:53:37 +09:00
14f07626e0 Wrap EVP_MD_CTX in a std::unique_ptr (#2557) 2024-10-20 16:07:53 +09:00
8c5ac5c2d9 Remove more raw pointers (#2556)
Make destructor public so std::unique_ptr can call it.  Also restrict
singleton creation to satisfy cppcheck.
2024-10-20 16:06:05 +09:00
4b6e53223b Use std::shared_ptr to refer to FdEntity (#2541)
FdEntity may have multiple references due to ChangeEntityToTempPath.
This relies on the std::enable_shared_from_this helper to create a
std::shared_ptr from this.  Fixes #2532.
2024-10-20 14:56:29 +09:00
a505cebf9b Expand use of std::unique_ptr for FILE* (#2555) 2024-10-18 22:06:47 +09:00
141d74f187 Use auto for iterator variable types (#2554)
This touches a few other long type names.  Applied via clang-tidy
-fix.
2024-10-18 21:57:52 +09:00
4c5b7595b4 Add missing GUARDED_BY to fdcache_entity (#2549)
This requires a fake GetMutex for the lock checker to understand the
control flow.  Also remove unneeded locking comments that annotation
supersede.  Follows on to #2491.
2024-10-18 00:39:45 +09:00
e613ae55bb Replace curl_warnings_lock with std::atomic (#2550)
This is simpler and lighter-weight.
2024-10-18 00:36:36 +09:00
5594106351 Add miscellaneous locking annotations (#2551) 2024-10-18 00:34:57 +09:00
c5031a5a97 Simplify some method parameters (#2553)
Add const where possible and avoid unnecessary reference parameters.
2024-10-18 00:05:42 +09:00
473f9df65a FreeBSD compilation fixes
Closes #2517.
2024-10-16 13:46:12 -07:00
0c26014812 Fixed exclusive control of upload_id in PseudoFdInfo class 2024-10-16 02:46:48 +09:00
06a3822965 [Improvement #2490] Add GUARDED_BY to FdEntity and fix locking 2024-10-15 02:04:46 +09:00
2d1409a672 Fixed warnings on integer comparisons in openssl_auth.cpp 2024-10-15 02:00:20 +09:00
1d3ab76cc4 Ensure that test checks data length (#2546)
wc has an optimization that can use metadata when stdin is set to a
file.  Also fix up logging.
2024-10-14 18:31:37 +09:00
c00b5fd4bb Propoagate error state from insertV4Headers (#2547)
s3fs_sha256_hex_fd returns an empty string when calculating the hash
fails.
2024-10-14 10:28:19 +09:00
000273a8de Configure target for clang thread safety checking (#2493) 2024-10-14 10:19:03 +09:00
3ba8c2a139 Replace non-standard VLAs with std::array (#2544) 2024-10-13 12:03:56 +09:00
c0219b38d1 Return correct success value from NSS s3fs_md5 (#2543) 2024-10-13 11:58:54 +09:00
40f95272be Fix locking annotations and add one missing lock (#2542)
This allows clang's thread safety checks to pass.
2024-10-13 11:53:56 +09:00
15e2eae69a Address clang-tidy 19 warnings (#2540) 2024-10-13 10:09:47 +09:00
743c86e506 Fix issues discovered by Coverity (#2535) 2024-10-08 08:07:13 +09:00
4605cc2035 Fixed fake_diskfree option 2024-10-07 01:24:35 +09:00
a259981f16 Enable cppcoreguidelines-pro-type-const-cast (#2537)
This fixes dangerous uses of const_cast.
2024-10-06 18:40:48 +09:00
bbbb2d0908 Specify deleter function for regex_t unique_ptr (#2536) 2024-10-06 18:30:22 +09:00
e80de15cc6 Make FILE ownership clearer via unique_ptr (#2534) 2024-10-06 18:03:02 +09:00
b283ab291a Modified and bypassed test_multipart_mix on MacOS with nocopyapi 2024-09-29 13:58:46 +09:00
c24015ae17 Modified and bypassed some MacOS tests 2024-09-29 09:17:11 +09:00
df5364d758 Enable readability-implicit-bool-conversion (#2530)
This fixes one real error, one misreported EPERM, and some false
positives.  References #2529.
2024-09-28 15:28:50 +09:00
52c10cd45d Call Rename outside AutoFdEntity scope (#2528)
This avoids a use-after-free in the destructor.
2024-09-28 15:25:02 +09:00
e8f1e3473c Fixed miss-return code in S3fsCurl::RequestPerform 2024-09-28 13:25:37 +09:00
37cf324c52 Return non-zero exit code on Valgrind errors (#2527)
Previously this ignored use-after-frees and other errors.
2024-09-28 11:37:25 +09:00
5691071ac6 Log entire line when curldbg lacks a newline (#2526)
Previously -o curldbg=body would read uninitialized memory.
2024-09-28 11:35:49 +09:00
6faaff10ee Fixed hardlink test for macos 2024-09-27 00:52:07 +09:00
4796e982ab Fixed opensuse/leap:15 test 2024-09-27 00:51:51 +09:00
22869d99a5 Set errno to zero before calling sysconf (#2515)
FreeBSD returns -1 since it has no limits for _SC_GETPW_R_SIZE_MAX and
_SC_GETGR_R_SIZE_MAX but is not required to reset errno.  Also fix up
logging.  Fixes #2514.
2024-08-24 09:48:54 +09:00
7d2d4e8866 s3 signv4 support uri endpoints (#2510) 2024-08-17 13:46:13 +09:00
4fe2652c6c Fix Windows compilation (#2506)
* docs: Fix Windows compile instructions

Signed-off-by: Naoki Ikeguchi <me@s6n.jp>

* fix: Use fallocate stub on MSYS2

Signed-off-by: Naoki Ikeguchi <me@s6n.jp>

---------

Signed-off-by: Naoki Ikeguchi <me@s6n.jp>
2024-07-27 09:54:38 +09:00
fcb5aa77fb Fix minor issues when compiling with MSYS (#2505)
References #2503.
2024-07-27 09:43:16 +09:00
411e42384e Acquire lock before logging (#2502)
This is not safe -- another caller could modify a std::string field
while logging which could read an invalid pointer.  References #2490.
2024-07-21 16:14:22 +09:00
1c2f61e2a5 Remove unneeded lock utility functions (#2500)
std::mutex RAII removes the need for these.

Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-07-15 15:32:39 +09:00
23efccbe39 Disable thread safety analysis on conditional locks (#2498)
Clang does not support this:

https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#conditional-locks

Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-07-15 15:17:47 +09:00
77ffe7d634 Wrap CURL* in a std::unique_ptr (#2495)
This is safer and clarifies the ownership of pointers.

Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-07-15 15:15:03 +09:00
60b871e0ae Simplify has_mp_stat (#2499)
This is unnecessary since we moved from pthread_mutex_t to
std::atomic.
2024-07-15 15:04:28 +09:00
44d5b5e1c9 Continued Github Actions(CI) execution after CentOS 7 EOL 2024-07-13 10:30:27 +05:30
03651a30ea Add infrastructure for clang static lock checking (#2492)
Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-07-13 09:48:43 +09:00
db80fa2eb0 Upgrade CI to Alpine 3.20 2024-07-11 23:14:07 +05:30
6f90c6918f Fix incorrect locking annotations (#2494)
Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-07-11 23:13:44 +09:00
437bf7ec95 Convert pthread_t to C++11 std::thread (#2481)
This has better cross-platform support and stronger type-safety.
2024-07-11 22:37:25 +09:00
50d5a73f84 Simplify curl progress tracking (#2486)
Use a struct with named fields instead of a pair for clarity and use a
single map to store the structs for efficiency.
2024-07-06 16:35:25 +09:00
ec183d0d9a Dropped Github Actions(CI) for CentOS 7 and Debian 10(Buster) 2024-07-01 23:46:18 +05:30
ae28a110ab Remove unused function (#2484)
Also clean up some function prototypes.

Co-authored-by: Takeshi Nakatani <ggtakec@gmail.com>
2024-07-01 23:30:32 +09:00
a6637b29e6 Opt in to all clang-tidy checks by default (#2477)
Opt out of the noisy checks.  Disable clang-tidy on Debian bullseye
and buster and Ubuntu 20.04 due to segfaults.
2024-07-01 22:29:34 +09:00
1a50b9a04a Fixed a deadlock in the FdManager::ChangeEntityToTempPath method call 2024-06-30 20:29:01 +05:30
585f137cf0 Remove unused headers found by clang-tidy (#2480)
Found via misc-include-cleaner but this requires more work.
2024-06-25 23:32:46 +09:00
1449905fe5 Make deleted constructors and operators public (#2479)
Deleting them better conveys the intent.
2024-06-24 08:26:24 +09:00
622dc0a815 Convert pthread_mutex to std::mutex (#2476)
This simplifies resource management and improve Windows compatibility.
2024-06-24 00:48:01 +09:00
86b353511a Replace memset with C++11 value initialization (#2471)
This generates the same code but is safer due to using an implicit
length and allowing member initialization.
2024-06-24 00:24:49 +09:00
a3a0ae523f Changed the display format of the Git commit hash in the version display 2024-06-23 16:01:33 +05:30
fa807a56fb Fix typos (#2473) 2024-06-23 15:33:46 +09:00
254d717a4a Address clang-tidy 19 warnings (#2474) 2024-06-23 12:21:51 +09:00
86e6bdaf4d Apply clang-tidy to headers (#2470) 2024-06-23 11:49:59 +09:00
2841601ad5 Remove uses of AutoLock::ALREADY_LOCKED (#2466)
Instead annotate the methods with REQUIRES so that the caller knows if
they should lock.  For public interfaces, introduce HasLock wrappers.
This simplifies control flow, allows migration to std::mutex, and
eventually will enable use of static lock checking.
2024-06-23 11:24:51 +09:00
39c2d8b2a7 Improve CI workflow 2024-06-14 16:22:12 +09:00
683452a9be Remove unnecessary copy constructors and operator= (#2468) 2024-06-10 23:37:53 +09:00
ebae5a302f Prefer std::string::clear where possible (#2467)
This is somewhat more clear and is declared noexcept.
2024-06-10 23:36:49 +09:00
ba7b2ef9f0 Return boolean as bool not int (#2465) 2024-06-04 22:38:30 +09:00
f3946a2310 Keep cache path parameters as std::string (#2464)
While some of these originate as char *, eventually they convert to
std::string in the std::map lookup.
2024-06-04 22:37:48 +09:00
a4f694c345 Pass const std::string by reference (#2461)
This is more idiomatic than by pointer.
2024-05-28 09:17:45 +09:00
2c532e8b79 Fixed error reports of failure in cppcheck 2.14.0 2024-05-28 07:29:03 +09:00
5c1932f702 Upgrade CI to Ubuntu 24.04 LTS (#2456) 2024-05-12 11:12:25 +09:00
ccdcccd44c Fix DeadLock in FdManager::ChangeEntityToTempPath (#2455)
commit e3b50ad introduce smart pointer to manage FdEntity

But in ChangeEntityToTempPath, we should not destroy the entity.

We should move the entry to the temp ky

Signed-off-by: liubingrun <liubr1@chinatelecom.cn>
2024-05-11 11:29:40 +09:00
3864f58c22 Upgrade CI to Fedora 40 (#2451) 2024-05-11 09:25:05 +09:00
c36827d1de Fixed README.md for Github Action Badge URL (#2449) 2024-04-28 20:10:53 +09:00
e2cc36a37f Updated COMPILATION.md about compilation on linux (#2445) 2024-04-28 14:31:01 +09:00
cf6102f91b Changed due to s3fs-fuse logo change (#2448) 2024-04-28 14:28:24 +09:00
dd6815b90f retry request on HTTP 429 error 2024-04-14 12:09:26 +09:00
95026804e9 Support SSL client cert and added ssl_client_cert option 2024-04-14 10:21:48 +09:00
9ab5a2ea73 Fixed configure error for GHA:sanitize_thread 2024-03-19 21:37:19 +09:00
a5cdd05c25 Added ipresolve option 2024-03-13 22:29:17 +09:00
31676f6201 Convert thpoolman_param to value (#2430)
This simplifies memory management.
2024-03-13 21:27:12 +09:00
c97f7a2a13 Address clang-tidy 18 warnings (#2428) 2024-03-07 01:04:22 +09:00
be54c34ecb Remove unneeded XML macros (#2427) 2024-03-07 00:45:34 +09:00
79597c7960 Upgrade CI to Alpine 3.19 (#2429) 2024-03-07 00:23:00 +09:00
103 changed files with 10616 additions and 7321 deletions

View File

@ -1,7 +1,10 @@
WarningsAsErrors: '*'
Checks: '
-*,
bugprone-*,
*,
-abseil-*,
-altera-*,
-android-*,
-boost-*,
-bugprone-assignment-in-if-condition,
-bugprone-branch-clone,
-bugprone-easily-swappable-parameters,
@ -9,68 +12,72 @@ Checks: '
-bugprone-macro-parentheses,
-bugprone-narrowing-conversions,
-bugprone-unhandled-self-assignment,
cert-*,
-cert-dcl50-cpp,
-cert-env33-c,
-cert-err33-c,
-cert-err58-cpp,
cppcoreguidelines-*,
-clang-analyzer-security.insecureAPI.strcpy,
-clang-analyzer-unix.BlockInCriticalSection,
-concurrency-mt-unsafe,
-cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-do-while,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-avoid-non-const-global-variables,
-cppcoreguidelines-init-variables,
-cppcoreguidelines-macro-usage,
-cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-no-malloc,
-cppcoreguidelines-owning-memory,
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
-cppcoreguidelines-pro-bounds-constant-array-index,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-type-member-init,
-cppcoreguidelines-pro-type-reinterpret-cast,
-cppcoreguidelines-pro-type-union-access,
-cppcoreguidelines-pro-type-vararg,
google-*,
-cppcoreguidelines-special-member-functions,
-fuchsia-*,
-google-build-using-namespace,
-google-readability-casting,
-google-readability-function-size,
-google-readability-todo,
-google-runtime-int,
-google-runtime-references,
misc-*,
-hicpp-*,
-llvm-*,
-llvmlibc-*,
-misc-const-correctness,
-misc-include-cleaner,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-redundant-expression,
-misc-unused-parameters,
-misc-use-anonymous-namespace,
modernize-*,
-misc-use-internal-linkage,
-modernize-avoid-c-arrays,
-modernize-loop-convert,
-modernize-make-unique,
-modernize-use-nodiscard,
-modernize-raw-string-literal,
-modernize-return-braced-init-list,
-modernize-use-auto,
-modernize-use-default-member-init,
-modernize-use-trailing-return-type,
-modernize-use-using,
performance-*,
-performance-avoid-endl,
-performance-no-int-to-ptr,
portability-*,
readability-*,
-readability-avoid-nested-conditional-operator,
-readability-braces-around-statements,
-readability-else-after-return,
-readability-function-cognitive-complexity,
-readability-function-size,
-readability-identifier-length,
-readability-implicit-bool-conversion,
-readability-inconsistent-declaration-parameter-name,
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-math-missing-parentheses,
-readability-named-parameter,
-readability-redundant-access-specifiers,
-readability-redundant-declaration,
-readability-simplify-boolean-expr,
-readability-suspicious-call-argument'
CheckOptions:
cppcoreguidelines-narrowing-conversions.WarnOnEquivalentBitWidth: 'false'
readability-implicit-bool-conversion.AllowIntegerConditions: 'true'
readability-implicit-bool-conversion.AllowPointerConditions: 'true'

View File

@ -0,0 +1,16 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: 'feature request'
assignees: ''
---
### Feature request
<!-- -------------------------------------------------------------
Please let us know your ideas, such as features you want to improve,
features to add, etc.
And list any related Issue or Pull Request numbers.
-------------------------------------------------------------- -->

View File

@ -1,10 +1,15 @@
---
name: Support request (Including bug reports)
about: Request support for usage, bugs, etc.
title: ''
labels: ''
assignees: ''
---
<!-- --------------------------------------------------------------------------
The following information is very important in order to help us to help you.
Omission of the following details may delay your support request or receive no
attention at all.
Keep in mind that the commands we provide to retrieve information are oriented
to GNU/Linux Distributions, so you could need to use others if you use s3fs on
macOS or BSD.
--------------------------------------------------------------------------- -->
### Additional Information
@ -15,6 +20,9 @@
#### Version of fuse being used (`pkg-config --modversion fuse`, `rpm -qi fuse` or `dpkg -s fuse`)
<!-- example: 2.9.2 -->
#### Provider (`AWS`, `OVH`, `Hetzner`, `iDrive E2`, ...)
<!-- example: AWS -->
#### Kernel information (`uname -r`)
<!-- example: 5.10.96-90.460.amzn2.x86_64 -->
@ -38,3 +46,4 @@
### Details about issue
<!-- Please describe the content of the issue in detail. -->

View File

@ -24,7 +24,7 @@ on:
push:
pull_request:
#
# CRON event is fire on every sunday(UTC).
# CRON event is fired on every sunday (UTC).
#
schedule:
- cron: '0 0 * * 0'
@ -35,7 +35,6 @@ on:
jobs:
Linux:
runs-on: ubuntu-latest
#
# build matrix for containers
#
@ -50,23 +49,23 @@ jobs:
#
matrix:
container:
- ubuntu:23.10
- ubuntu:25.10
- ubuntu:24.04
- ubuntu:22.04
- ubuntu:20.04
- debian:trixie
- debian:bookworm
- debian:bullseye
- debian:buster
- rockylinux:9
- rockylinux:8
- centos:centos7
- fedora:39
- fedora:38
- rockylinux/rockylinux:10
- rockylinux/rockylinux:9
- rockylinux/rockylinux:8
- fedora:43
- fedora:42
- opensuse/leap:15
- alpine:3.18
- opensuse/leap:16.0
- alpine:3.22
container:
image: ${{ matrix.container }}
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
env:
@ -79,23 +78,17 @@ jobs:
# [NOTE]
# On openSUSE, tar and gzip must be installed before action/checkout.
#
- name: Install packages before checkout
run: |
if [ "${{ matrix.container }}" = "opensuse/leap:15" ]; then zypper install -y tar gzip; fi
- name: Install openSUSE packages before checkout
if: matrix.container == 'opensuse/leap:15' || matrix.container == 'opensuse/leap:16.0'
run: zypper install -y tar gzip
# [NOTE]
# actions/checkout@v3 uses nodejs v16 and will be deprecated.
# However, @v4 does not work on centos7 depending on the glibc version,
# so we will continue to use @v3.
#
- name: Checkout source code(other than centos7)
if: matrix.container != 'centos:centos7'
- name: Install Alpine packages before checkout
if: matrix.container == 'alpine:3.22'
run: apk add --no-progress --no-cache bash
- name: Checkout source code
uses: actions/checkout@v4
- name: Checkout source code(only centos7)
if: matrix.container == 'centos:centos7'
uses: actions/checkout@v3
# [NOTE]
# Matters that depend on OS:VERSION are determined and executed in the following script.
# Please note that the option to configure (CONFIGURE_OPTIONS) is set in the environment variable.
@ -110,27 +103,6 @@ jobs:
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
make --jobs=$(nproc)
- name: clang-tidy
run: |
# skip if clang-tidy does not exist, e.g., CentOS 7
if command -v clang-tidy; then
make -C src/ clang-tidy
make -C test/ clang-tidy
fi
- name: Cppcheck
run: |
# specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z)
if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then
make cppcheck
fi
- name: Shellcheck
run: |
if shellcheck --version | awk -F '[ .]' '/version:/ && ($2 * 1000 + $3 <= 7) { exit(1) }'; then
make shellcheck
fi
- name: Test suite
run: |
make check -C src
@ -141,11 +113,18 @@ jobs:
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
# see. https://github.com/macos-fuse-t/fuse-t
# About osxfuse
# This job doesn't work with Github Actions using macOS 11+ because "load_osxfuse" returns
# This job doesn't work with GitHub Actions using macOS 11+ because "load_osxfuse" returns
# "exit code = 1".(requires OS reboot)
#
macos12:
runs-on: macos-12
macos-14:
runs-on: macos-14
# [NOTE]
# In macos-14 (and maybe later), the location of the CA certificate is different and you need to specify it.
# We give the CA path as an environment variable.
#
env:
CURL_CA_BUNDLE: "/opt/homebrew/etc/ca-certificates/cert.pem"
steps:
- name: Checkout source code
@ -160,37 +139,27 @@ jobs:
- name: Install fuse-t
run: |
if [ ! -d /usr/local/include ]; then sudo mkdir -p /usr/local/include; echo "Created /usr/local/include directory"; fi
HOMEBREW_NO_AUTO_UPDATE=1 brew install fuse-t
- name: Install brew other packages
run: |
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck jq';
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi; done;
- name: Install awscli2
run: |
cd /tmp
curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg"
sudo installer -pkg AWSCLIV2.pkg -target /
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do
if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi
done
- name: Build
run: |
./autogen.sh
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++11 -DS3FS_PTHREAD_ERRORCHECK=1'
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure
make --jobs=$(sysctl -n hw.ncpu)
- name: Cppcheck
run: |
# specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z)
if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then
make cppcheck
fi
run: make cppcheck
- name: Shellcheck
run: |
if shellcheck --version | awk -F '[ .]' '/version:/ && ($2 * 1000 + $3 <= 7) { exit(1) }'; then
make shellcheck
fi
run: make shellcheck
- name: Test suite
run: |
@ -222,10 +191,11 @@ jobs:
- sanitize_address
- sanitize_others
- sanitize_thread
- thread_safety
- valgrind
container:
image: fedora:39
image: fedora:43
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
@ -235,54 +205,107 @@ jobs:
- name: Install packages
run: |
.github/workflows/linux-ci-helper.sh fedora:39
.github/workflows/linux-ci-helper.sh fedora:43
- name: Install clang
run: |
dnf install -y clang
if [ "${{ matrix.checktype }}" = "valgrind" ]; then
dnf install -y valgrind
fi
dnf install -y \
clang \
libcxx \
libcxx-devel
- name: Install Valgrind
if: matrix.checktype == 'valgrind'
run: dnf install -y valgrind
#
# Set CXX/CXXFLAGS and Variables for test
#
- name: Set variables
run: |
COMMON_CXXFLAGS='-g -Wno-cpp -DS3FS_PTHREAD_ERRORCHECK=1'
if [ "${{ matrix.checktype }}" = "glibc_debug" ]; then
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -D_GLIBCXX_DEBUG" >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_address" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=address -fsanitize-address-use-after-scope" >> $GITHUB_ENV
echo 'ASAN_OPTIONS=detect_leaks=1,detect_stack_use_after_return=1' >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_memory" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=memory" >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_thread" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=thread" >> $GITHUB_ENV
echo 'TSAN_OPTIONS=halt_on_error=1' >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "sanitize_others" ]; then
echo 'CXX=clang++' >> $GITHUB_ENV
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow" >> $GITHUB_ENV
elif [ "${{ matrix.checktype }}" = "valgrind" ]; then
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1" >> $GITHUB_ENV
echo 'VALGRIND=--leak-check=full' >> $GITHUB_ENV
echo 'RETRIES=100' >> $GITHUB_ENV
echo 'S3_URL=http://127.0.0.1:8081' >> $GITHUB_ENV
fi
COMMON_CXXFLAGS='-g -Wno-cpp'
{
if [ "${{ matrix.checktype }}" = "glibc_debug" ]; then
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -D_GLIBCXX_DEBUG"
elif [ "${{ matrix.checktype }}" = "sanitize_address" ]; then
echo 'CXX=clang++'
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=address -fsanitize-address-use-after-scope"
echo 'ASAN_OPTIONS=detect_leaks=1,detect_stack_use_after_return=1'
elif [ "${{ matrix.checktype }}" = "sanitize_memory" ]; then
echo 'CXX=clang++'
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=memory"
elif [ "${{ matrix.checktype }}" = "sanitize_thread" ]; then
echo 'CXX=clang++'
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=thread"
echo 'TSAN_OPTIONS=halt_on_error=1'
# [NOTE]
# Set this to avoid following error when running configure.
# "FATAL: ThreadSanitizer: unexpected memory mapping"
sysctl vm.mmap_rnd_bits=28
elif [ "${{ matrix.checktype }}" = "sanitize_others" ]; then
echo 'CXX=clang++'
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow"
elif [ "${{ matrix.checktype }}" = "thread_safety" ]; then
echo 'CXX=clang++'
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -Wthread-safety -Wthread-safety-beta -stdlib=libc++ -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -Werror"
echo 'LDFLAGS=-DCLANG_DEFAULT_LINKER=lld'
elif [ "${{ matrix.checktype }}" = "valgrind" ]; then
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1"
echo 'VALGRIND="--leak-check=full --error-exitcode=1"'
echo 'RETRIES=100'
echo 'S3_URL=http://127.0.0.1:8081'
fi
} >> "$GITHUB_ENV"
- name: Build
run: |
./autogen.sh
/bin/sh -c "CXX=${CXX} CXXFLAGS=\"${CXXFLAGS}\" ./configure --prefix=/usr --with-openssl"
make
/bin/sh -c "CXX=${CXX} CXXFLAGS=\"${CXXFLAGS}\" LDFLAGS=\"${LDFLAGS}\" ./configure --prefix=/usr --with-openssl"
make --jobs=$(nproc)
- name: Test suite
run: |
/bin/sh -c "ALL_TESTS=1 ASAN_OPTIONS=${ASAN_OPTIONS} TSAN_OPTIONS=${TSAN_OPTIONS} VALGRIND=${VALGRIND} RETRIES=${RETRIES} make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)"
static-checks:
runs-on: ubuntu-latest
container:
image: fedora:42
steps:
- name: Checkout source code
uses: actions/checkout@v4
- name: Install packages
run: |
.github/workflows/linux-ci-helper.sh fedora:42
- name: Install extra packages
run: |
dnf install -y \
clang-tools-extra \
cppcheck \
python3 \
ShellCheck
- name: Build
run: |
./autogen.sh
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
make --jobs=$(nproc)
- name: clang-tidy
run: |
make clang-tidy
- name: Cppcheck
run: |
make cppcheck
- name: Shellcheck
run: |
make shellcheck
#
# Local variables:
# tab-width: 4

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
#
# s3fs - FUSE-based file system backed by Amazon S3
#
@ -53,20 +53,21 @@ fi
#-----------------------------------------------------------
CONTAINER_FULLNAME=$1
# shellcheck disable=SC2034
CONTAINER_OSNAME=$(echo "${CONTAINER_FULLNAME}" | sed 's/:/ /g' | awk '{print $1}')
CONTAINER_OSNAME=$(echo "${CONTAINER_FULLNAME}" | cut -d: -f1)
# shellcheck disable=SC2034
CONTAINER_OSVERSION=$(echo "${CONTAINER_FULLNAME}" | sed 's/:/ /g' | awk '{print $2}')
CONTAINER_OSVERSION=$(echo "${CONTAINER_FULLNAME}" | cut -d: -f2)
#-----------------------------------------------------------
# Common variables for awscli2
#-----------------------------------------------------------
AWSCLI_URI="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip"
AWSCLI_ZIP_FILE="awscliv2.zip"
CURL_DIRECT_VERSION="v8.11.0"
CURL_DIRECT_URL="https://github.com/moparisthebest/static-curl/releases/download/${CURL_DIRECT_VERSION}/curl-$(uname -m | sed -e s/x86_64/amd64/)"
CURL_HASH_X86_64="d18aa1f4e03b50b649491ca2c401cd8c5e89e72be91ff758952ad2ab5a83135d"
CURL_HASH_AARCH64="1b050abd1669f9a2ac29b34eb022cdeafb271dce5a4fb57d8ef8fadff6d7be1f"
#-----------------------------------------------------------
# Parameters for configure(set environments)
#-----------------------------------------------------------
CXXFLAGS="-O -DS3FS_PTHREAD_ERRORCHECK=1"
CXX="g++"
CXXFLAGS="-O"
LDFLAGS=""
CONFIGURE_OPTIONS="--prefix=/usr --with-openssl"
#-----------------------------------------------------------
@ -77,149 +78,272 @@ CONFIGURE_OPTIONS="--prefix=/usr --with-openssl"
#
PACKAGE_ENABLE_REPO_OPTIONS=""
PACKAGE_INSTALL_ADDITIONAL_OPTIONS=""
SHELLCHECK_DIRECT_INSTALL=0
AWSCLI_DIRECT_INSTALL=1
CURL_DIRECT_INSTALL=0
if [ "${CONTAINER_FULLNAME}" = "ubuntu:23.10" ]; then
if [ "${CONTAINER_FULLNAME}" = "ubuntu:25.10" ] ||
[ "${CONTAINER_FULLNAME}" = "ubuntu:24.04" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-21-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
INSTALL_PACKAGES=(
attr
autoconf
autotools-dev
build-essential
curl
fuse
g++
git
jq
libcurl4-openssl-dev
libfuse-dev
libssl-dev
libtool
libxml2-dev
locales-all
mailcap
openjdk-21-jre-headless
pkg-config
)
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
INSTALL_PACKAGES=(
attr
autoconf
autotools-dev
build-essential
curl
fuse
g++
git
jq
libcurl4-openssl-dev
libfuse-dev
libssl-dev
libtool
libxml2-dev
locales-all
mime-support
openjdk-21-jre-headless
pkg-config
)
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:20.04" ]; then
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "debian:trixie" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
INSTALL_PACKAGES=(
attr
autoconf
autotools-dev
build-essential
curl
fuse
g++
git
jq
libcurl4-openssl-dev
libfuse-dev
libssl-dev
libtool
libxml2-dev
locales-all
mailcap
openjdk-21-jre-headless
pkg-config
procps
)
elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ]; then
elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ] ||
[ "${CONTAINER_FULLNAME}" = "debian:bullseye" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
INSTALL_PACKAGES=(
attr
autoconf
autotools-dev
build-essential
curl
fuse
g++
git
jq
libcurl4-openssl-dev
libfuse-dev
libssl-dev
libtool
libxml2-dev
locales-all
mime-support
openjdk-17-jre-headless
pkg-config
procps
)
elif [ "${CONTAINER_FULLNAME}" = "debian:bullseye" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
CURL_DIRECT_INSTALL=1
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "debian:buster" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy default-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "rockylinux:9" ]; then
elif [ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:10" ] ||
[ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:9" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
PACKAGE_ENABLE_REPO_OPTIONS="--enablerepo=crb"
# [NOTE]
# Rocky Linux 9 (or CentOS Stream 9) images may have curl installation issues that
# Rocky Linux 9/10 (or CentOS Stream 9/10) images may have curl installation issues that
# conflict with the curl-minimal package.
#
PACKAGE_INSTALL_ADDITIONAL_OPTIONS="--allowerasing"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel attr diffutils curl python3 procps unzip xz https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm"
INSTALL_CHECKER_PKGS="cppcheck"
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel"
INSTALL_PACKAGES=(
attr
automake
curl
curl-devel
diffutils
fuse
fuse-devel
gcc
gcc-c++
git
glibc-langpack-en
java-21-openjdk-headless
jq
libstdc++-devel
libxml2-devel
mailcap
make
openssl
openssl-devel
perl-Test-Harness
procps
xz
)
# [NOTE]
# For RockyLinux, ShellCheck is downloaded from the github archive and installed.
#
SHELLCHECK_DIRECT_INSTALL=1
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "rockylinux:8" ]; then
elif [ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:8" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel attr diffutils curl python3 unzip"
INSTALL_CHECKER_PKGS="cppcheck"
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=powertools"
INSTALL_PACKAGES=(
attr
automake
curl
curl-devel
diffutils
fuse
fuse-devel
gcc
gcc-c++
git
glibc-langpack-en
java-21-openjdk-headless
jq
libstdc++-devel
libxml2-devel
mailcap
make
openssl
openssl-devel
perl-Test-Harness
)
# [NOTE]
# For RockyLinux, ShellCheck is downloaded from the github archive and installed.
#
SHELLCHECK_DIRECT_INSTALL=1
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "centos:centos7" ]; then
PACKAGE_MANAGER_BIN="yum"
PACKAGE_UPDATE_OPTIONS="update -y"
PACKAGE_INSTALL_OPTIONS="install -y"
# [NOTE]
# ShellCheck version(0.3.8) is too low to check.
# And in this version, it cannot be passed due to following error.
# "shellcheck: ./test/integration-test-main.sh: hGetContents: invalid argument (invalid byte sequence)"
#
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel llvm-toolset-7-clang-tools-extra gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl openssl-devel attr curl python3 epel-release unzip"
INSTALL_CHECKER_PKGS="cppcheck jq"
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel"
elif [ "${CONTAINER_FULLNAME}" = "fedora:39" ]; then
elif [ "${CONTAINER_FULLNAME}" = "fedora:43" ] ||
[ "${CONTAINER_FULLNAME}" = "fedora:42" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel curl attr diffutils procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
INSTALL_CHECKER_PKG_OPTIONS=""
INSTALL_PACKAGES=(
attr
automake
curl
curl-devel
diffutils
fuse
fuse-devel
gawk
gcc
gcc-c++
git
glibc-langpack-en
java-latest-openjdk-headless
jq
libstdc++-devel
libxml2-devel
mailcap
make
openssl
openssl-devel
perl-Test-Harness
procps
)
elif [ "${CONTAINER_FULLNAME}" = "fedora:38" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel curl attr diffutils procps python3-pip unzip"
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
INSTALL_CHECKER_PKG_OPTIONS=""
elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ]; then
elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ] ||
[ "${CONTAINER_FULLNAME}" = "opensuse/leap:16.0" ]; then
PACKAGE_MANAGER_BIN="zypper"
PACKAGE_UPDATE_OPTIONS="refresh"
PACKAGE_INSTALL_OPTIONS="install -y"
INSTALL_PACKAGES="automake clang-tools curl-devel fuse fuse-devel gcc-c++ java-17-openjdk-headless jq libxml2-devel make openssl openssl-devel python3-pip curl attr ShellCheck unzip"
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
INSTALL_CHECKER_PKG_OPTIONS=""
INSTALL_PACKAGES=(
attr
automake
curl
curl-devel
diffutils
fuse
fuse-devel
gcc-c++
java-21-openjdk-headless
jq
libxml2-devel
make
openssl
openssl-devel
procps
python3
)
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.18" ]; then
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.22" ]; then
PACKAGE_MANAGER_BIN="apk"
PACKAGE_UPDATE_OPTIONS="update --no-progress"
PACKAGE_INSTALL_OPTIONS="add --no-progress --no-cache"
INSTALL_PACKAGES="bash clang-extra-tools curl g++ make automake autoconf libtool git curl-dev fuse-dev jq libxml2-dev openssl coreutils procps attr sed mailcap openjdk17 aws-cli"
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
INSTALL_CHECKER_PKG_OPTIONS=""
AWSCLI_DIRECT_INSTALL=0
INSTALL_PACKAGES=(
attr
autoconf
automake
coreutils
curl
curl-dev
fuse-dev
g++
git
jq
libtool
libxml2-dev
mailcap
make
openjdk21
openssl
perl-test-harness-utils
procps
sed
)
else
echo "No container configured for: ${CONTAINER_FULLNAME}"
@ -236,62 +360,48 @@ echo "${PRGNAME} [INFO] Updates."
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${PACKAGE_UPDATE_OPTIONS}"
#
# Install packages ( with cppcheck )
# Install packages
#
echo "${PRGNAME} [INFO] Install packages."
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${PACKAGE_ENABLE_REPO_OPTIONS} ${PACKAGE_INSTALL_OPTIONS} ${PACKAGE_INSTALL_ADDITIONAL_OPTIONS} ${INSTALL_PACKAGES}"
echo "${PRGNAME} [INFO] Install cppcheck package."
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${INSTALL_CHECKER_PKG_OPTIONS} ${PACKAGE_INSTALL_OPTIONS} ${INSTALL_CHECKER_PKGS}"
#
# Install ShellCheck manually
#
if [ "${SHELLCHECK_DIRECT_INSTALL}" -eq 1 ]; then
echo "${PRGNAME} [INFO] Install shellcheck package from github archive."
if ! LATEST_SHELLCHECK_DOWNLOAD_URL=$(curl --silent --show-error https://api.github.com/repos/koalaman/shellcheck/releases/latest | jq -r '.assets[].browser_download_url | select(contains("linux.x86_64"))'); then
echo "Could not get shellcheck package url"
exit 1
fi
if ! curl -s -S -L -o /tmp/shellcheck.tar.xz "${LATEST_SHELLCHECK_DOWNLOAD_URL}"; then
echo "Failed to download shellcheck package from ${LATEST_SHELLCHECK_DOWNLOAD_URL}"
exit 1
fi
if ! tar -C /usr/bin/ -xf /tmp/shellcheck.tar.xz --no-anchored 'shellcheck' --strip=1; then
echo "Failed to extract and install shellcheck."
rm -f /tmp/shellcheck.tar.xz
exit 1
fi
rm -f /tmp/shellcheck.tar.xz
fi
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${PACKAGE_ENABLE_REPO_OPTIONS} ${PACKAGE_INSTALL_OPTIONS} ${PACKAGE_INSTALL_ADDITIONAL_OPTIONS} ${INSTALL_PACKAGES[*]}"
# Check Java version
java -version
#
# Install awscli
#
if [ "${AWSCLI_DIRECT_INSTALL}" -eq 1 ]; then
echo "${PRGNAME} [INFO] Install awscli2 package."
# Install newer curl for older distributions
if [ "${CURL_DIRECT_INSTALL}" -eq 1 ]; then
echo "${PRGNAME} [INFO] Install newer curl package."
CURRENT_DIR=$(pwd)
cd /tmp || exit 1
curl --fail --location --silent --output "/tmp/curl" "${CURL_DIRECT_URL}"
case "$(uname -m)" in
x86_64) curl_hash="$CURL_HASH_X86_64" ;;
aarch64) curl_hash="$CURL_HASH_AARCH64" ;;
*) exit 1 ;;
esac
echo "$curl_hash" "/tmp/curl" | sha256sum --check
mv "/tmp/curl" "/usr/local/bin/curl"
chmod +x "/usr/local/bin/curl"
curl "${AWSCLI_URI}" -o "${AWSCLI_ZIP_FILE}"
unzip "${AWSCLI_ZIP_FILE}"
./aws/install
cd "${CURRENT_DIR}" || exit 1
# Rocky Linux 8 and 9 have a different certificate path
if [ ! -f /etc/ssl/certs/ca-certificates.crt ]; then
ln -s /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
fi
fi
# Check curl version
curl --version
#-----------------------------------------------------------
# Set environment for configure
#-----------------------------------------------------------
echo "${PRGNAME} [INFO] Set environment for configure options"
echo "CXXFLAGS=${CXXFLAGS}" >> "${GITHUB_ENV}"
echo "CONFIGURE_OPTIONS=${CONFIGURE_OPTIONS}" >> "${GITHUB_ENV}"
cat << EOF >> "${GITHUB_ENV}"
CXX=${CXX}
CXXFLAGS=${CXXFLAGS}
LDFLAGS=${LDFLAGS}
CONFIGURE_OPTIONS=${CONFIGURE_OPTIONS}
EOF
echo "${PRGNAME} [INFO] Finish Linux helper for installing packages."

3
.gitignore vendored
View File

@ -82,8 +82,11 @@ src/s3fs
src/test_curl_util
src/test_page_list
src/test_string_util
test/chaos-http-proxy-*
test/junk_data
test/pjdfstest
test/pjd-pjdfstest-*
test/s3proxy-*
test/write_multiblock
test/mknod_test

View File

@ -6,7 +6,9 @@ If you want specific instructions for some distributions, check the [wiki](https
Keep in mind using the pre-built packages when available.
1. Ensure your system satisfies build and runtime dependencies for:
## Compilation on Linux
### Ensure your system satisfies build and runtime dependencies for:
* fuse >= 2.8.4
* automake
@ -14,7 +16,10 @@ Keep in mind using the pre-built packages when available.
* make
* libcurl
* libxml2
* openssl
* openssl/gnutls/nss
* Please prepare the library according to the OS on which you will compile.
* It is necessary to match the library used by libcurl.
* Install the OpenSSL, GnuTLS or NSS devel package.
* mime.types (the package providing depends on the OS)
* s3fs tries to detect `/etc/mime.types` as default regardless of the OS
* Else s3fs tries to detect `/etc/apache2/mime.types` if OS is macOS
@ -22,26 +27,48 @@ Keep in mind using the pre-built packages when available.
* Alternatively, you can set mime.types file path with `mime` option without detecting these default files
* pkg-config (or your OS equivalent)
2. Then compile from master via the following commands:
* NOTE
If you have any trouble about details on required packages, see `INSTALL_PACKAGES` in [linux-ci-helper.sh](https://github.com/s3fs-fuse/s3fs-fuse/blob/master/.github/workflows/linux-ci-helper.sh).
### Then compile from master via the following commands:
1. Clone the source code:
```sh
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
```
2. Configuration:
```sh
cd s3fs-fuse
./autogen.sh
./configure
```
Depending on the TLS library (OpenSSL/GnuTLS/NSS), add `--with-openssl`, `--with-gnutls` or `--with-nss` when executing `configure`. (If omitted, it is equivalent to `--with-openssl`.)
3. Building:
```sh
make
```
4. Installing:
```sh
sudo make install
```
### NOTE - The required libraries/components required to run s3fs are:
* fuse >= 2.8.4
* libcurl
* libxml2
* openssl/gnutls/nss
* mime.types (the package providing depends on the OS)
```
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure
make
sudo make install
```
## Compilation on Windows (using MSYS2)
On Windows, use [MSYS2](https://www.msys2.org/) to compile for itself.
1. Install [WinFsp](https://github.com/billziss-gh/winfsp) to your machine.
1. Install [WinFsp](https://github.com/billziss-gh/winfsp) to your machine. Note it should be installed with developer mode to include header files.
2. Install dependencies onto MSYS2:
```sh
pacman -S git autoconf automake gcc make pkg-config libopenssl-devel libcurl-devel libxml2-devel libzstd-devel
pacman -S git autoconf automake gcc make pkg-config openssl-devel libcurl-devel libxml2-devel libzstd-devel
```
3. Clone this repository, then change directory into the cloned one.
@ -74,7 +101,7 @@ On Windows, use [MSYS2](https://www.msys2.org/) to compile for itself.
```sh
./autogen.sh
PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$(pwd)" ./configure
make
make CXXFLAGS="-I/usr/include"
```
7. Copy binary files to distribute at one place:

View File

@ -1,6 +1,16 @@
ChangeLog for S3FS
------------------
Version 1.95 -- 25 Oct, 2024 (major changes only)
#2424 - Add ipresolve option to select IPv4- or IPv6-only
#2443 - Retry request on HTTP 429 error
#2448 - Changed s3fs logo
#2455 - Fix deadlock in FdManager::ChangeEntityToTempPath
#2487 - #2492 - #2493 - Enable static lock checking and fix locking errors
#2506 - #2517 - Fix Windows compilation
#2515 - Fix FreeBSD support
#2532 - Fix use-after-free in FdManager::ChangeEntityToTempPath
Version 1.94 -- 23 Feb, 2024 (major changes only)
#2409 - Fixed a bug that mounting with ksmid specified to fail
#2404 - Fixed ordering problem between fdatasync and flush
@ -535,7 +545,7 @@ issue #3 - Fixed local timezone was incorrectly being applied to IAM and Last-Mo
issue #4 - Fix compilation error on MacOSX with missing const
Version 1.74 -- Nov 24, 2013
This version is initial version on Github, same as on GoogleCodes(s3fs).
This version is initial version on GitHub, same as on GoogleCodes(s3fs).
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz

View File

@ -31,10 +31,14 @@ release : dist ../utils/release.sh
.PHONY: cppcheck shellcheck
clang-tidy:
make -C src/ clang-tidy
make -C test/ clang-tidy
cppcheck:
cppcheck --quiet --error-exitcode=1 \
--inline-suppr \
--std=c++11 \
--std=@CPP_VERSION@ \
--xml \
-D HAVE_ATTR_XATTR_H \
-D HAVE_SYS_EXTATTR_H \
@ -46,6 +50,8 @@ cppcheck:
--suppress=unmatchedSuppression \
--suppress=useStlAlgorithm \
--suppress=checkLevelNormal \
--suppress=normalCheckLevelMaxBranches \
--addon=test/map-subscript-read.py \
src/ test/
#

View File

@ -4,9 +4,11 @@ s3fs allows Linux, macOS, and FreeBSD to mount an S3 bucket via [FUSE(Filesystem
s3fs makes you operate files and directories in S3 bucket like a local file system.
s3fs preserves the native object format for files, allowing use of other tools like [AWS CLI](https://github.com/aws/aws-cli).
[![s3fs-fuse CI](https://github.com/s3fs-fuse/s3fs-fuse/workflows/s3fs-fuse%20CI/badge.svg)](https://github.com/s3fs-fuse/s3fs-fuse/actions)
[![s3fs-fuse CI](https://github.com/s3fs-fuse/s3fs-fuse/actions/workflows/ci.yml/badge.svg)](https://github.com/s3fs-fuse/s3fs-fuse/actions/workflows/ci.yml)
[![Twitter Follow](https://img.shields.io/twitter/follow/s3fsfuse.svg?style=social&label=Follow)](https://twitter.com/s3fsfuse)
![s3fs-fuse](https://github.com/ggtakec/s3fs-fuse-images/blob/master/images/s3fslogo.png)
## Features
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes

View File

@ -21,11 +21,15 @@
echo "--- Make commit hash file -------"
SHORTHASH="unknown"
SHORTHASH=""
if command -v git > /dev/null 2>&1 && test -d .git; then
if RESULT=$(git rev-parse --short HEAD); then
SHORTHASH="${RESULT}"
fi
if SHORTHASH=$(git rev-parse --short HEAD); then
echo " -> Git commit hash : ${SHORTHASH}"
else
echo " -> Not get git commit hash"
fi
else
echo " -> Not found git command or .git directory"
fi
echo "${SHORTHASH}" > default_commit_hash

View File

@ -20,7 +20,7 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ([2.69])
AC_INIT([s3fs],[1.94])
AC_INIT([s3fs],[1.95])
AC_CONFIG_HEADER([config.h])
AC_CANONICAL_TARGET
@ -34,7 +34,10 @@ AC_CHECK_HEADERS([attr/xattr.h])
AC_CHECK_HEADERS([sys/extattr.h])
AC_CHECK_FUNCS([fallocate])
CXXFLAGS="-Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3 -std=c++11 $CXXFLAGS"
CPP_VERSION=c++14
AC_SUBST([CPP_VERSION])
CXXFLAGS="-Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3 -std=$CPP_VERSION $CXXFLAGS"
dnl ----------------------------------------------
dnl For macOS
@ -367,11 +370,22 @@ AS_IF([test -d .git], [DOTGITDIR=yes], [DOTGITDIR=no])
AC_MSG_CHECKING([github short commit hash])
if test "x${GITCMD}" = "xyes" -a "x${DOTGITDIR}" = "xyes"; then
GITCOMMITHASH=`git rev-parse --short HEAD`
TMP_GITCOMMITHASH=`git rev-parse --short HEAD`
UNTRACKED_FILES=`git status -s --untracked-files=no`
if test -n "${UNTRACKED_FILES}"; then
GITCOMMITHASH="(commit:${TMP_GITCOMMITHASH} +untracked files)"
else
GITCOMMITHASH="(commit:${TMP_GITCOMMITHASH})"
fi
elif test -f default_commit_hash; then
GITCOMMITHASH=`cat default_commit_hash`
TMP_GITCOMMITHASH=`cat default_commit_hash`
if test -n "${TMP_GITCOMMITHASH}"; then
GITCOMMITHASH="(base commit:${TMP_GITCOMMITHASH})"
else
GITCOMMITHASH=""
fi
else
GITCOMMITHASH="unknown"
GITCOMMITHASH=""
fi
AC_MSG_RESULT([${GITCOMMITHASH}])

View File

@ -168,10 +168,16 @@ specify expire time (seconds) for entries in the stat cache and symbolic link ca
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
.TP
\fB\-o\fR disable_noobj_cache (default is enable)
By default s3fs memorizes when an object does not exist up until the stat cache timeout.
This caching can cause staleness for applications.
If disabled, s3fs will not memorize objects and may cause extra HeadObject requests and reduce performance.
\fB\-o\fR enable_negative_cache (default is enabled negative cache)
This option will keep non-existence of objects in a stat cache.
When this negative cache is enabled, it will not process extra HeadObject requests to search for non-existent objects, improving performance.
This feature is enabled by default, so there is no need to specify it.
.TP
\fB\-o\fR disable_negative_cache (default is enabled negative cache)
By default, s3fs keeps non-existent objects in the stat cache.
This option disables this negative caching.
This prevents delays in updates due to cache retention.
However, it may increase the number of HeadObject requests to check if an object exists, which may decrease performance.
.TP
\fB\-o\fR no_check_certificate (by default this option is disabled)
server certificate won't be checked against the available certificate authorities.
@ -179,21 +185,24 @@ server certificate won't be checked against the available certificate authoritie
\fB\-o\fR ssl_verify_hostname (default="2")
When 0, do not verify the SSL certificate against the hostname.
.TP
\fB\-o\fR ssl_client_cert (default="")
Specify an SSL client certificate.
Specify this optional parameter in the following format:
"<SSL Cert>[:<Cert Type>[:<Private Key>[:<Key Type>
[:<Password>]]]]"
<SSL Cert>: Client certificate.
Specify the file path or NickName(for NSS, etc.).
<Cert Type>: Type of certificate, default is "PEM"(optional).
<Private Key>: Certificate's private key file(optional).
<Key Type>: Type of private key, default is "PEM"(optional).
<Password>: Passphrase of the private key(optional). It is also possible to omit this value and specify it using the environment variable "S3FS_SSL_PRIVKEY_PASSWORD".
.TP
\fB\-o\fR nodnscache - disable DNS cache.
s3fs is always using DNS cache, this option make DNS cache disable.
.TP
\fB\-o\fR nosscache - disable SSL session cache.
s3fs is always using SSL session cache, this option make SSL session cache disable.
.TP
\fB\-o\fR multireq_max (default="20")
maximum number of parallel request for listing objects.
.TP
\fB\-o\fR parallel_count (default="5")
number of parallel request for uploading big objects.
s3fs uploads large object (over 25MB by default) by multipart post request, and sends parallel requests.
This option limits parallel request count which s3fs requests at once.
It is necessary to set this value depending on a CPU and a network band.
.TP
\fB\-o\fR multipart_size (default="10")
part size, in MB, for each multipart request.
The minimum value is 5 MB and the maximum value is 5 GB.
@ -252,12 +261,13 @@ Set a service path when the non-Amazon host requires a prefix.
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
If you do not use https, please specify the URL with the url option.
.TP
\fB\-o\fR endpoint (default="us-east-1")
sets the endpoint to use on signature version 4.
\fB\-o\fR region (default="us-east-1")
sets the region to use on signature version 4.
If this option is not specified, s3fs uses "us-east-1" region as the default.
If the s3fs could not connect to the region specified by this option, s3fs could not run.
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
You can also specify the legacy -o endpoint which means the same thing.
.TP
\fB\-o\fR sigv2 (default is signature version 4 falling back to version 2)
sets signing AWS requests by using only signature version 2.
@ -284,10 +294,9 @@ If this option is enabled, a sequential upload will be performed in parallel wit
This is expected to give better performance than other upload functions.
Note that this option is still experimental and may change in the future.
.TP
\fB\-o\fR max_thread_count (default is "5")
Specifies the number of threads waiting for stream uploads.
Note that this option and Streamm Upload are still experimental and subject to change in the future.
This option will be merged with "parallel_count" in the future.
\fB\-o\fR max_thread_count (default is "10")
This value is the maximum number of parallel requests to be sent, and the number of parallel processes for head requests, multipart uploads and stream uploads.
Worker threads will be started to process requests according to this value.
.TP
\fB\-o\fR enable_content_md5 (default is disable)
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
@ -418,6 +427,12 @@ Username and passphrase are valid only for HTTP schema.
If the HTTP proxy does not require authentication, this option is not required.
Separate the username and passphrase with a ':' character and specify each as a URL-encoded string.
.TP
\fB\-o\fR ipresolve (default="whatever")
Select what type of IP addresses to use when establishing a connection.
Default('whatever') can use addresses of all IP versions(IPv4 and IPv6) that your system allows.
If you specify 'IPv4', only IPv4 addresses are used.
And when 'IPv6' is specified, only IPv6 addresses will be used.
.TP
\fB\-o\fR logfile - specify the log output file.
s3fs outputs the log file to syslog. Alternatively, if s3fs is started with the "-f" option specified, the log will be output to the stdout/stderr.
You can use this option to specify the log file that s3fs outputs.
@ -504,15 +519,15 @@ s3fs is a multi-threaded application. Depending on the workload it may use multi
.TP
.SS Performance of S3 requests
.TP
s3fs provides several options (e.g. "\-o multipart_size", "\-o parallel_count") to control behaviour and thus indirectly the performance. The possible combinations of these options in conjunction with the various S3 backends are so varied that there is no individual recommendation other than the default values. Improved individual settings can be found by testing and measuring.
s3fs provides several options (e.g. "max_thread_count" option) to control behaviour and thus indirectly the performance. The possible combinations of these options in conjunction with the various S3 backends are so varied that there is no individual recommendation other than the default values. Improved individual settings can be found by testing and measuring.
.TP
The two options "Enable no object cache" ("\-o enable_noobj_cache") and "Disable support of alternative directory names" ("\-o notsup_compat_dir") can be used to control shared access to the same bucket by different applications:
The two options "Enable negative cache" ("\-o enable_negative_cache") and "Disable support of alternative directory names" ("\-o notsup_compat_dir") can be used to control shared access to the same bucket by different applications:
.TP
.IP \[bu]
Enable no object cache ("\-o enable_noobj_cache")
Enable negative cache ("\-o enable_negative_cache")
.RS
.TP
If a bucket is used exclusively by an s3fs instance, you can enable the cache for non-existent files and directories with "\-o enable_noobj_cache". This eliminates repeated requests to check the existence of an object, saving time and possibly money.
If a bucket is used exclusively by an s3fs instance, you can enable the cache for non-existent files and directories with "\-o enable_negative_cache". This eliminates repeated requests to check the existence of an object, saving time and possibly money.
.RE
.IP \[bu]
Enable support of alternative directory names ("\-o compat_dir")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.3 KiB

View File

@ -36,14 +36,15 @@ s3fs_SOURCES = \
metaheader.cpp \
mpu_util.cpp \
curl.cpp \
curl_handlerpool.cpp \
curl_multi.cpp \
curl_share.cpp \
curl_util.cpp \
s3objlist.cpp \
cache.cpp \
cache_node.cpp \
string_util.cpp \
s3fs_cred.cpp \
s3fs_util.cpp \
s3fs_threadreqs.cpp \
fdcache.cpp \
fdcache_entity.cpp \
fdcache_page.cpp \
@ -52,10 +53,11 @@ s3fs_SOURCES = \
fdcache_fdinfo.cpp \
fdcache_pseudofd.cpp \
fdcache_untreated.cpp \
filetimes.cpp \
addhead.cpp \
sighandlers.cpp \
autolock.cpp \
threadpoolman.cpp \
syncfiller.cpp \
common_auth.cpp
if USE_SSL_OPENSSL
s3fs_SOURCES += openssl_auth.cpp
@ -102,7 +104,9 @@ TESTS = \
test_string_util
clang-tidy:
clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS)
clang-tidy -extra-arg-before=-xc++ -extra-arg=-std=@CPP_VERSION@ \
*.h $(s3fs_SOURCES) test_curl_util.cpp test_page_list.cpp test_string_util.cpp \
-- $(DEPS_CFLAGS) $(CPPFLAGS)
#
# Local variables:

View File

@ -23,7 +23,9 @@
#include <cstring>
#include <sstream>
#include <fstream>
#include <string>
#include <strings.h>
#include <utility>
#include <vector>
#include "s3fs.h"
@ -119,7 +121,7 @@ bool AdditionalHeader::Load(const char* file)
key.erase(0, strlen(ADD_HEAD_REGEX));
// compile
std::unique_ptr<regex_t> preg(new regex_t);
RegexPtr preg(new regex_t, regfree);
int result;
if(0 != (result = regcomp(preg.get(), key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
char errbuf[256];
@ -131,7 +133,7 @@ bool AdditionalHeader::Load(const char* file)
addheadlist.emplace_back(std::move(preg), key, head, value);
}else{
// not regex, directly comparing
addheadlist.emplace_back(nullptr, key, head, value);
addheadlist.emplace_back(RegexPtr(nullptr, regfree), key, head, value);
}
// set flag
@ -164,7 +166,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
// [NOTE]
// Because to allow duplicate key, and then scanning the entire table.
//
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
for(auto iter = addheadlist.cbegin(); iter != addheadlist.cend(); ++iter){
const add_header *paddhead = &*iter;
if(paddhead->pregex){
@ -194,12 +196,11 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch
if(!AddHeader(meta, path)){
return list;
}
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){
// Adding header
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
}
meta.clear();
S3FS_MALLOCTRIM(0);
return list;
}
@ -214,7 +215,7 @@ bool AdditionalHeader::Dump() const
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
for(auto iter = addheadlist.cbegin(); iter != addheadlist.cend(); ++iter, ++cnt){
const add_header *paddhead = &*iter;
ssdbg << " [" << cnt << "] = {" << std::endl;

View File

@ -23,6 +23,8 @@
#include <memory>
#include <regex.h>
#include <string>
#include <utility>
#include <vector>
#include "metaheader.h"
@ -30,25 +32,22 @@
//----------------------------------------------
// Structure / Typedef
//----------------------------------------------
typedef std::unique_ptr<regex_t, decltype(&regfree)> RegexPtr;
struct add_header{
add_header(std::unique_ptr<regex_t> pregex, std::string basestring, std::string headkey, std::string headvalue)
add_header(RegexPtr pregex, std::string basestring, std::string headkey, std::string headvalue)
: pregex(std::move(pregex))
, basestring(std::move(basestring))
, headkey(std::move(headkey))
, headvalue(std::move(headvalue))
{}
~add_header() {
if(pregex){
regfree(pregex.get());
}
}
add_header(const add_header&) = delete;
add_header(add_header&& val) = default;
add_header& operator=(const add_header&) = delete;
add_header& operator=(add_header&&) = delete;
std::unique_ptr<regex_t> pregex; // not nullptr means using regex, nullptr means comparing suffix directly.
RegexPtr pregex; // not nullptr means using regex, nullptr means comparing suffix directly.
std::string basestring;
std::string headkey;
std::string headvalue;
@ -69,12 +68,13 @@ class AdditionalHeader
protected:
AdditionalHeader();
~AdditionalHeader();
public:
AdditionalHeader(const AdditionalHeader&) = delete;
AdditionalHeader(AdditionalHeader&&) = delete;
AdditionalHeader& operator=(const AdditionalHeader&) = delete;
AdditionalHeader& operator=(AdditionalHeader&&) = delete;
public:
// Reference singleton
static AdditionalHeader* get() { return &singleton; }

View File

@ -1,63 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_AUTOLOCK_H_
#define S3FS_AUTOLOCK_H_
#include <pthread.h>
//-------------------------------------------------------------------
// AutoLock Class
//-------------------------------------------------------------------
class AutoLock
{
public:
enum Type {
NO_WAIT = 1,
ALREADY_LOCKED = 2,
NONE = 0
};
private:
pthread_mutex_t* const auto_mutex;
bool is_lock_acquired;
private:
AutoLock(const AutoLock&) = delete;
AutoLock(AutoLock&&) = delete;
AutoLock& operator=(const AutoLock&) = delete;
AutoLock& operator=(AutoLock&&) = delete;
public:
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
~AutoLock();
bool isLockAcquired() const;
};
#endif // S3FS_AUTOLOCK_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

File diff suppressed because it is too large Load Diff

View File

@ -22,58 +22,16 @@
#define S3FS_CACHE_H_
#include <cstring>
#include <map>
#include <mutex>
#include <string>
#include <sys/stat.h>
#include <vector>
#include "autolock.h"
#include "common.h"
#include "metaheader.h"
//-------------------------------------------------------------------
// Structure
//-------------------------------------------------------------------
//
// Struct for stats cache
//
struct stat_cache_entry {
struct stat stbuf;
unsigned long hit_count;
struct timespec cache_date;
headers_t meta;
bool isforce;
bool noobjcache; // Flag: cache is no object for no listing.
unsigned long notruncate; // 0<: not remove automatically at checking truncate
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L)
{
memset(&stbuf, 0, sizeof(struct stat));
cache_date.tv_sec = 0;
cache_date.tv_nsec = 0;
meta.clear();
}
};
typedef std::map<std::string, stat_cache_entry> stat_cache_t; // key=path
//
// Struct for symbolic link cache
//
struct symlink_cache_entry {
std::string link;
unsigned long hit_count;
struct timespec cache_date; // The function that operates timespec uses the same as Stats
symlink_cache_entry() : link(""), hit_count(0)
{
cache_date.tv_sec = 0;
cache_date.tv_nsec = 0;
}
};
typedef std::map<std::string, symlink_cache_entry> symlink_cache_t;
//
// Typedefs for No truncate file name cache
//
typedef std::vector<std::string> notruncate_filelist_t; // untruncated file name list in dir
typedef std::map<std::string, notruncate_filelist_t> notruncate_dir_map_t; // key is parent dir path
#include "s3objlist.h"
#include "cache_node.h"
//-------------------------------------------------------------------
// Class StatCache
@ -91,31 +49,26 @@ class StatCache
{
private:
static StatCache singleton;
static pthread_mutex_t stat_cache_lock;
stat_cache_t stat_cache;
bool IsExpireTime;
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
time_t ExpireTime;
unsigned long CacheSize;
bool IsCacheNoObject;
symlink_cache_t symlink_cache;
notruncate_dir_map_t notruncate_file_cache;
static std::mutex stat_cache_lock;
std::shared_ptr<DirStatCache> pMountPointDir GUARDED_BY(stat_cache_lock); // Top directory = Mount point
unsigned long CacheSize;
private:
StatCache();
~StatCache();
void Clear();
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
// Truncate stat cache
bool TruncateCache(AutoLock::Type locktype = AutoLock::NONE);
// Truncate symbolic link cache
bool TruncateSymlink(AutoLock::Type locktype = AutoLock::NONE);
bool AddNotruncateCache(const std::string& key);
bool DelNotruncateCache(const std::string& key);
bool AddStatHasLock(const std::string& key, const struct stat* pstbuf, const headers_t* pmeta, objtype_t type, bool notruncate) REQUIRES(StatCache::stat_cache_lock);
bool TruncateCacheHasLock(bool check_only_oversize_case = true) REQUIRES(StatCache::stat_cache_lock);
bool DelStatHasLock(const std::string& key) REQUIRES(StatCache::stat_cache_lock);
bool RawGetChildStats(const std::string& dir, s3obj_list_t* plist, s3obj_type_map_t* pobjmap);
public:
StatCache(const StatCache&) = delete;
StatCache(StatCache&&) = delete;
StatCache& operator=(const StatCache&) = delete;
StatCache& operator=(StatCache&&) = delete;
// Reference singleton
static StatCache* getStatCacheData()
{
@ -125,83 +78,56 @@ class StatCache
// Attribute
unsigned long GetCacheSize() const;
unsigned long SetCacheSize(unsigned long size);
time_t GetExpireTime() const;
time_t SetExpireTime(time_t expire, bool is_interval = false);
time_t UnsetExpireTime();
bool SetCacheNoObject(bool flag);
bool EnableCacheNoObject()
{
return SetCacheNoObject(true);
}
bool DisableCacheNoObject()
{
return SetCacheNoObject(false);
}
bool GetCacheNoObject() const
{
return IsCacheNoObject;
}
// Get stat cache
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = nullptr)
bool GetStat(const std::string& key, struct stat* pstbuf, headers_t* pmeta, objtype_t* ptype, const char* petag = nullptr);
bool GetStat(const std::string& key, struct stat* pstbuf, headers_t* pmeta)
{
return GetStat(key, pst, meta, overcheck, nullptr, pisforce);
return GetStat(key, pstbuf, pmeta, nullptr, nullptr);
}
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true)
bool GetStat(const std::string& key, struct stat* pstbuf, const char* petag)
{
return GetStat(key, pst, nullptr, overcheck, nullptr, nullptr);
return GetStat(key, pstbuf, nullptr, nullptr, petag);
}
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true)
bool GetStat(const std::string& key, struct stat* pstbuf)
{
return GetStat(key, nullptr, meta, overcheck, nullptr, nullptr);
return GetStat(key, pstbuf, nullptr, nullptr, nullptr);
}
bool HasStat(const std::string& key, bool overcheck = true)
bool GetStat(const std::string& key, headers_t* pmeta)
{
return GetStat(key, nullptr, nullptr, overcheck, nullptr, nullptr);
return GetStat(key, nullptr, pmeta, nullptr, nullptr);
}
bool HasStat(const std::string& key, const char* etag, bool overcheck = true)
bool HasStat(const std::string& key, const char* petag = nullptr)
{
return GetStat(key, nullptr, nullptr, overcheck, etag, nullptr);
return GetStat(key, nullptr, nullptr, nullptr, petag);
}
bool HasStat(const std::string& key, struct stat* pst, const char* etag)
{
return GetStat(key, pst, nullptr, true, etag, nullptr);
}
// Cache For no object
bool IsNoObjectCache(const std::string& key, bool overcheck = true);
bool AddNoObjectCache(const std::string& key);
// Add stat cache
bool AddStat(const std::string& key, const headers_t& meta, bool forcedir = false, bool no_truncate = false);
bool AddStat(const std::string& key, const struct stat& stbuf, const headers_t& meta, objtype_t type, bool notruncate = false);
bool AddStat(const std::string& key, const struct stat& stbuf, objtype_t type, bool notruncate = false);
bool AddNegativeStat(const std::string& key);
// Update meta stats
bool UpdateMetaStats(const std::string& key, const headers_t& meta);
bool UpdateStat(const std::string& key, const struct stat& stbuf, const headers_t& meta);
// Change no truncate flag
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
void ClearNoTruncateFlag(const std::string& key);
// Delete stat cache
bool DelStat(const char* key, AutoLock::Type locktype = AutoLock::NONE);
bool DelStat(const std::string& key, AutoLock::Type locktype = AutoLock::NONE)
{
return DelStat(key.c_str(), locktype);
}
bool DelStat(const std::string& key);
// Cache for symbolic link
bool GetSymlink(const std::string& key, std::string& value);
bool AddSymlink(const std::string& key, const std::string& value);
bool DelSymlink(const char* key, AutoLock::Type locktype = AutoLock::NONE);
bool AddSymlink(const std::string& key, const struct stat& stbuf, const headers_t& meta, const std::string& value);
// Cache for Notruncate file
bool GetNotruncateCache(const std::string& parentdir, notruncate_filelist_t& list);
// Get List/Map
bool GetChildStatList(const std::string& dir, s3obj_list_t& list);
bool GetChildStatMap(const std::string& dir, s3obj_type_map_t& objmap);
// For debugging
void Dump(bool detail);
};
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat* pst, bool forcedir = false);
#endif // S3FS_CACHE_H_
/*

1454
src/cache_node.cpp Normal file

File diff suppressed because it is too large Load Diff

360
src/cache_node.h Normal file
View File

@ -0,0 +1,360 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CACHE_NODE_H_
#define S3FS_CACHE_NODE_H_
#include <iosfwd>
#include <memory>
#include <mutex>
#include "common.h"
#include "metaheader.h"
#include "s3objlist.h"
#include "types.h"
//-------------------------------------------------------------------
// Utilities
//-------------------------------------------------------------------
#define MAX_STAT_CACHE_COUNTER 6
constexpr int stat_counter_pos(objtype_t type)
{
if(IS_FILE_OBJ(type)){
return 1;
}else if(IS_SYMLINK_OBJ(type)){
return 2;
}else if(IS_DIR_OBJ(type)){
return 3;
}else if(IS_NEGATIVE_OBJ(type)){
return 4;
}else{ // objtype_t::UNKNOWN and other
return 0;
}
}
//-------------------------------------------------------------------
// Base Class : StatCacheNode
//-------------------------------------------------------------------
class DirStatCache;
class StatCacheNode : public std::enable_shared_from_this<StatCacheNode>
{
// [NOTE]
// As an exception, declare friends to call some protected methods from
// DirStatCache::RemoveChildHasLock and AddHasLock methods.
//
friend class DirStatCache;
protected:
// Stat cache counter(see. stat_counter_pos())
// <position>
// 0 = total node count
// 1 = file node count
// 2 = symlink node count
// 3 = directory node count
// 4 = negative cache node count
//
static std::mutex counter_lock;
static unsigned long counter[MAX_STAT_CACHE_COUNTER] GUARDED_BY(counter_lock);
static bool EnableExpireTime;
static bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
static time_t ExpireTime;
static bool UseNegativeCache;
static std::mutex cache_lock; // for internal data
static unsigned long DisableCheckingExpire GUARDED_BY(cache_lock); // If greater than 0, it disables the expiration check, which allows disabling checks during processing.
static struct timespec DisableExpireDate GUARDED_BY(cache_lock); // Data registered after this time will not be truncated(if 0 < DisableCheckingExpire)
private:
objtype_t cache_type GUARDED_BY(StatCacheNode::cache_lock) = objtype_t::UNKNOWN; // object type is set in the constructor(except dir).
std::string fullpath GUARDED_BY(StatCacheNode::cache_lock); // full path(This value is set only when the object is created)
unsigned long hit_count GUARDED_BY(StatCacheNode::cache_lock) = 0L; // hit count
struct timespec cache_date GUARDED_BY(StatCacheNode::cache_lock) = {0, 0}; // registration/renewal time
bool notruncate GUARDED_BY(StatCacheNode::cache_lock) = false; // If true, not remove automatically at checking truncate.
bool has_stat GUARDED_BY(StatCacheNode::cache_lock) = false; // valid stat information flag (for case only path registration and no stat information)
struct stat stbuf GUARDED_BY(StatCacheNode::cache_lock) = {}; // stat data
bool has_meta GUARDED_BY(StatCacheNode::cache_lock) = false; // valid meta headers information flag (for case only path registration and no meta headers)
headers_t meta GUARDED_BY(StatCacheNode::cache_lock); // meta list
bool has_extval GUARDED_BY(StatCacheNode::cache_lock) = false; // valid extra value flag
std::string extvalue GUARDED_BY(StatCacheNode::cache_lock); // extra value for key(ex. used for symlink)
protected:
static void IncrementCacheCount(objtype_t type);
static void DecrementCacheCount(objtype_t type);
static bool SetNegativeCache(bool flag);
static bool NeedExpireCheckHasLock(const struct timespec& ts) REQUIRES(StatCacheNode::cache_lock);
// Cache Type
bool isSameObjectTypeHasLock(objtype_t type) const REQUIRES(StatCacheNode::cache_lock);
bool isDirectoryHasLock() const REQUIRES(StatCacheNode::cache_lock);
bool isFileHasLock() const REQUIRES(StatCacheNode::cache_lock);
bool isSymlinkHasLock() const REQUIRES(StatCacheNode::cache_lock);
bool isNegativeHasLock() const REQUIRES(StatCacheNode::cache_lock);
// Clear
virtual bool ClearDataHasLock() REQUIRES(StatCacheNode::cache_lock);
virtual bool ClearHasLock() REQUIRES(StatCacheNode::cache_lock);
virtual bool RemoveChildHasLock(const std::string& strpath) REQUIRES(StatCacheNode::cache_lock);
virtual bool isRemovableHasLock() REQUIRES(StatCacheNode::cache_lock);
// Add
virtual bool AddHasLock(const std::string& strpath, const struct stat* pstat, const headers_t* pmeta, objtype_t type, bool is_notruncate) REQUIRES(StatCacheNode::cache_lock);
// Update(Set)
bool UpdateHasLock(objtype_t type) REQUIRES(StatCacheNode::cache_lock);
virtual bool UpdateHasLock(const struct stat* pstat, const headers_t* pmeta, bool clear_meta) REQUIRES(StatCacheNode::cache_lock);
virtual bool UpdateHasLock(const struct stat* pstat, bool clear_meta) REQUIRES(StatCacheNode::cache_lock);
virtual bool UpdateHasLock(bool is_notruncate) REQUIRES(StatCacheNode::cache_lock);
virtual bool UpdateHasLock(const std::string* pextvalue) REQUIRES(StatCacheNode::cache_lock);
virtual bool UpdateHasLock() REQUIRES(StatCacheNode::cache_lock);
virtual bool SetHasLock(const struct stat& stbuf, const headers_t& meta, bool is_notruncate) REQUIRES(StatCacheNode::cache_lock);
// Get
objtype_t GetTypeHasLock() const REQUIRES(StatCacheNode::cache_lock);
const std::string& GetPathHasLock() const REQUIRES(StatCacheNode::cache_lock);
bool HasStatHasLock() const REQUIRES(StatCacheNode::cache_lock);
bool HasMetaHasLock() const REQUIRES(StatCacheNode::cache_lock);
bool GetNoTruncateHasLock() const REQUIRES(StatCacheNode::cache_lock);
virtual bool GetHasLock(headers_t* pmeta, struct stat* pst) REQUIRES(StatCacheNode::cache_lock);
virtual bool GetExtraHasLock(std::string& value) REQUIRES(StatCacheNode::cache_lock);
virtual s3obj_type_map_t::size_type GetChildMapHasLock(s3obj_type_map_t& childmap) REQUIRES(StatCacheNode::cache_lock);
// Find
virtual bool CheckETagValueHasLock(const char* petagval) const REQUIRES(StatCacheNode::cache_lock);
virtual std::shared_ptr<StatCacheNode> FindHasLock(const std::string& strpath, const char* petagval, bool& needTruncate) REQUIRES(StatCacheNode::cache_lock);
// Cache out
bool IsExpireStatCacheTimeHasLock() const REQUIRES(StatCacheNode::cache_lock);
virtual bool IsExpiredHasLock() REQUIRES(StatCacheNode::cache_lock);
virtual bool TruncateCacheHasLock() REQUIRES(StatCacheNode::cache_lock);
// For debug
void DumpElementHasLock(const std::string& indent, std::ostringstream& oss) const REQUIRES(StatCacheNode::cache_lock);
virtual void DumpHasLock(const std::string& indent, bool detail, std::ostringstream& oss) REQUIRES(StatCacheNode::cache_lock);
public:
// Properties
static unsigned long GetCacheCount(objtype_t type = objtype_t::UNKNOWN);
static time_t GetExpireTime();
static time_t SetExpireTime(time_t expire, bool is_interval = false);
static time_t UnsetExpireTime();
static bool IsEnableExpireTime();
static bool EnableNegativeCache() { return SetNegativeCache(true); }
static bool DisableNegativeCache() { return SetNegativeCache(false); }
static bool IsEnabledNegativeCache() { return UseNegativeCache; }
static bool PreventExpireCheck();
static bool ResumeExpireCheck();
// Constructor/Destructor
explicit StatCacheNode(const char* path = nullptr, objtype_t type = objtype_t::UNKNOWN);
virtual ~StatCacheNode();
StatCacheNode(const StatCacheNode&) = delete;
StatCacheNode(StatCacheNode&&) = delete;
StatCacheNode& operator=(const StatCacheNode&) = delete;
StatCacheNode& operator=(StatCacheNode&&) = delete;
// Cache Type
bool isSameObjectType(objtype_t type);
bool isDirectory();
bool isFile();
bool isSymlink();
bool isNegative();
// Clear
bool Clear();
bool ClearData();
bool RemoveChild(const std::string& strpath);
// Add
bool Add(const std::string& strpath, const struct stat* pstat, const headers_t* pmeta, objtype_t type, bool is_notruncate = false);
bool AddExtra(const std::string& value);
// Update(Set)
bool Update(const struct stat& stbuf, const headers_t& meta);
bool Update(const struct stat& stbuf, bool clear_meta);
bool Update(bool is_notruncate);
bool Update(const std::string& extvalue);
bool Set(const struct stat& stbuf, const headers_t& meta, bool is_notruncate);
// Get
std::string Get();
bool Get(headers_t* pmeta, struct stat* pstbuf);
bool Get(headers_t& get_meta, struct stat& st);
bool Get(headers_t& get_meta);
bool Get(struct stat& st);
objtype_t GetType() const;
struct timespec GetDate() const;
unsigned long GetHitCount() const;
unsigned long IncrementHitCount();
bool GetExtra(std::string& value);
s3obj_type_map_t::size_type GetChildMap(s3obj_type_map_t& childmap);
// Find
std::shared_ptr<StatCacheNode> Find(const std::string& strpath, const char* petagval = nullptr);
// Cache out
bool IsExpired();
void ClearNoTruncate();
bool TruncateCache();
// For debug
void Dump(bool detail);
};
typedef std::map<std::string, std::shared_ptr<StatCacheNode>> statcache_map_t;
//-------------------------------------------------------------------
// Derived Class : FileStatCache
//-------------------------------------------------------------------
class FileStatCache : public StatCacheNode
{
public:
explicit FileStatCache(const char* path = nullptr);
~FileStatCache() override;
FileStatCache(const FileStatCache&) = delete;
FileStatCache(FileStatCache&&) = delete;
FileStatCache& operator=(const FileStatCache&) = delete;
FileStatCache& operator=(FileStatCache&&) = delete;
};
//-------------------------------------------------------------------
// Derived Class : DirStatCache
//-------------------------------------------------------------------
// [NOTE]
// The fullpath of a DirStatCache always ends with a slash ('/').
// The keys of the 'children' map managed by this object are the partial
// path names of the child objects(files, directories, etc).
// For sub-directory objects, the partial path names do not include a
// slash.
//
class DirStatCache : public StatCacheNode
{
private:
std::mutex dir_cache_lock; // for local variables
struct timespec last_check_date GUARDED_BY(dir_cache_lock) = {0, 0};
objtype_t dir_cache_type GUARDED_BY(dir_cache_lock) = objtype_t::UNKNOWN; // [NOTE] backup for use in destructors only
statcache_map_t children GUARDED_BY(dir_cache_lock);
protected:
bool ClearHasLock() override REQUIRES(StatCacheNode::cache_lock);
bool RemoveChildHasLock(const std::string& strpath) override REQUIRES(StatCacheNode::cache_lock);
bool isRemovableHasLock() override REQUIRES(StatCacheNode::cache_lock);
bool HasExistedChildHasLock() REQUIRES(StatCacheNode::cache_lock, dir_cache_lock);
bool AddHasLock(const std::string& strpath, const struct stat* pstat, const headers_t* pmeta, objtype_t type, bool is_notruncate) override REQUIRES(StatCacheNode::cache_lock);
s3obj_type_map_t::size_type GetChildMapHasLock(s3obj_type_map_t& childmap) override REQUIRES(StatCacheNode::cache_lock);
std::shared_ptr<StatCacheNode> FindHasLock(const std::string& strpath, const char* petagval, bool& needTruncate) override REQUIRES(StatCacheNode::cache_lock);
bool NeedTruncateProcessing();
bool IsExpiredHasLock() override REQUIRES(StatCacheNode::cache_lock);
bool TruncateCacheHasLock() override REQUIRES(StatCacheNode::cache_lock);
bool GetChildLeafNameHasLock(const std::string& strpath, std::string& strLeafName, bool& hasNestedChildren) REQUIRES(StatCacheNode::cache_lock);
void DumpHasLock(const std::string& indent, bool detail, std::ostringstream& oss) override REQUIRES(StatCacheNode::cache_lock);
public:
explicit DirStatCache(const char* path = nullptr, objtype_t type = objtype_t::DIR_NORMAL);
~DirStatCache() override;
DirStatCache(const DirStatCache&) = delete;
DirStatCache(DirStatCache&&) = delete;
DirStatCache& operator=(const DirStatCache&) = delete;
DirStatCache& operator=(DirStatCache&&) = delete;
};
//-------------------------------------------------------------------
// Derived Class : SymlinkStatCache
//-------------------------------------------------------------------
class SymlinkStatCache : public StatCacheNode
{
private:
std::string link_path;
protected:
bool ClearHasLock() override REQUIRES(StatCacheNode::cache_lock);
public:
explicit SymlinkStatCache(const char* path = nullptr);
~SymlinkStatCache() override;
SymlinkStatCache(const SymlinkStatCache&) = delete;
SymlinkStatCache(SymlinkStatCache&&) = delete;
SymlinkStatCache& operator=(const SymlinkStatCache&) = delete;
SymlinkStatCache& operator=(SymlinkStatCache&&) = delete;
};
//-------------------------------------------------------------------
// Derived Class : NegativeStatCache
//-------------------------------------------------------------------
class NegativeStatCache : public StatCacheNode
{
protected:
bool CheckETagValueHasLock(const char* petagval) const override REQUIRES(StatCacheNode::cache_lock);
bool IsExpiredHasLock() override REQUIRES(StatCacheNode::cache_lock);
public:
explicit NegativeStatCache(const char* path = nullptr);
~NegativeStatCache() override;
NegativeStatCache(const NegativeStatCache&) = delete;
NegativeStatCache(NegativeStatCache&&) = delete;
NegativeStatCache& operator=(const NegativeStatCache&) = delete;
NegativeStatCache& operator=(NegativeStatCache&&) = delete;
};
//-------------------------------------------------------------------
// Utility Class : PreventStatCacheExpire
//-------------------------------------------------------------------
class PreventStatCacheExpire
{
public:
explicit PreventStatCacheExpire()
{
StatCacheNode::PreventExpireCheck();
}
~PreventStatCacheExpire()
{
StatCacheNode::ResumeExpireCheck();
}
PreventStatCacheExpire(const PreventStatCacheExpire&) = delete;
PreventStatCacheExpire(PreventStatCacheExpire&&) = delete;
PreventStatCacheExpire& operator=(const PreventStatCacheExpire&) = delete;
PreventStatCacheExpire& operator=(PreventStatCacheExpire&&) = delete;
};
#endif // S3FS_CACHE_NODE_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -21,10 +21,11 @@
#ifndef S3FS_COMMON_H_
#define S3FS_COMMON_H_
#include <atomic>
#include <string>
#include <sys/types.h>
#include "../config.h"
#include "types.h"
//-------------------------------------------------------------------
// Global variables
@ -42,15 +43,56 @@ extern std::string program_name;
extern std::string service_path;
extern std::string s3host;
extern std::string mount_prefix;
extern std::string endpoint;
extern std::string region;
extern std::string cipher_suites;
extern std::string instance_name;
extern std::atomic<long long unsigned> num_requests_head_object;
extern std::atomic<long long unsigned> num_requests_put_object;
extern std::atomic<long long unsigned> num_requests_get_object;
extern std::atomic<long long unsigned> num_requests_delete_object;
extern std::atomic<long long unsigned> num_requests_list_bucket;
extern std::atomic<long long unsigned> num_requests_mpu_initiate;
extern std::atomic<long long unsigned> num_requests_mpu_complete;
extern std::atomic<long long unsigned> num_requests_mpu_abort;
extern std::atomic<long long unsigned> num_requests_mpu_upload_part;
extern std::atomic<long long unsigned> num_requests_mpu_copy_part;
//-------------------------------------------------------------------
// For weak attribute
//-------------------------------------------------------------------
#define S3FS_FUNCATTR_WEAK __attribute__ ((weak,unused))
//-------------------------------------------------------------------
// For clang -Wthread-safety
//-------------------------------------------------------------------
#ifdef __clang__
#define THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
#endif
#define GUARDED_BY(x) \
THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x))
#define PT_GUARDED_BY(x) \
THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE(requires_capability(__VA_ARGS__))
#define RETURN_CAPABILITY(...) \
THREAD_ANNOTATION_ATTRIBUTE(lock_returned(__VA_ARGS__))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis)
#endif // S3FS_COMMON_H_
/*

File diff suppressed because it is too large Load Diff

View File

@ -21,14 +21,21 @@
#ifndef S3FS_CURL_H_
#define S3FS_CURL_H_
#include <atomic>
#include <cstdint>
#include <curl/curl.h>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
#include "autolock.h"
#include "metaheader.h"
#include "common.h"
#include "fdcache_page.h"
#include "metaheader.h"
#include "s3fs_util.h"
#include "types.h"
//----------------------------------------------
// Avoid dependency on libcurl version
@ -66,17 +73,18 @@
//----------------------------------------------
// Structure / Typedefs
//----------------------------------------------
typedef std::pair<double, double> progress_t;
typedef std::map<CURL*, time_t> curltime_t;
typedef std::map<CURL*, progress_t> curlprogress_t;
struct curlprogress {
time_t time;
double dl_progress;
double ul_progress;
};
typedef std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> CurlUniquePtr;
//----------------------------------------------
// class S3fsCurl
//----------------------------------------------
class CurlHandlerPool;
class S3fsCred;
class S3fsCurl;
class Semaphore;
// Prototype function for lazy setup options for curl handle
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
@ -88,12 +96,10 @@ typedef std::vector<sseckeymap_t> sseckeylist_t;
//
class S3fsCurl
{
friend class S3fsMultiCurl;
private:
enum class REQTYPE {
enum class REQTYPE : int8_t {
UNSET = -1,
DELETE = 0,
DELETE,
HEAD,
PUTHEAD,
PUT,
@ -110,21 +116,18 @@ class S3fsCurl
IAMROLE
};
// Environment name
static constexpr char S3FS_SSL_PRIVKEY_PASSWORD[] = "S3FS_SSL_PRIVKEY_PASSWORD";
// class variables
static pthread_mutex_t curl_warnings_lock;
static bool curl_warnings_once; // emit older curl warnings only once
static pthread_mutex_t curl_handles_lock;
static std::atomic<bool> curl_warnings_once; // emit older curl warnings only once
static std::mutex curl_handles_lock;
static struct callback_locks_t {
pthread_mutex_t dns;
pthread_mutex_t ssl_session;
std::mutex dns;
std::mutex ssl_session;
} callback_locks;
static bool is_initglobal_done;
static CurlHandlerPool* sCurlPool;
static int sCurlPoolSize;
static CURLSH* hCurlShare;
static bool is_cert_check;
static bool is_dns_cache;
static bool is_ssl_session_cache;
static long connect_timeout;
static time_t readwrite_timeout;
static int retries;
@ -139,12 +142,15 @@ class S3fsCurl
static bool is_dump_body;
static S3fsCred* ps3fscred;
static long ssl_verify_hostname;
static curltime_t curl_times;
static curlprogress_t curl_progress;
static std::string client_cert;
static std::string client_cert_type;
static std::string client_priv_key;
static std::string client_priv_key_type;
static std::string client_key_password;
static std::map<const CURL*, curlprogress> curl_progress;
static std::string curl_ca_bundle;
static mimes_t mimeTypes;
static std::string userAgent;
static int max_parallel_cnt;
static int max_multireq;
static off_t multipart_size;
static off_t multipart_copy_size;
@ -156,13 +162,12 @@ class S3fsCurl
static std::string proxy_url;
static bool proxy_http;
static std::string proxy_userpwd; // load from file(<username>:<passphrase>)
static long ipresolve_type; // this value is a libcurl symbol.
// variables
CURL* hCurl;
CurlUniquePtr hCurl PT_GUARDED_BY(curl_handles_lock) = {nullptr, curl_easy_cleanup};
REQTYPE type; // type of request
std::string path; // target object path
std::string base_path; // base path (for multi curl head request)
std::string saved_path; // saved path = cache key (for multi curl head request)
std::string url; // target object path(url)
struct curl_slist* requestHeaders;
headers_t responseHeaders; // header data by HeaderCallback
@ -173,22 +178,14 @@ class S3fsCurl
off_t postdata_remaining; // use by post method and read callback function.
filepart partdata; // use by multipart upload/get object callback
bool is_use_ahbe; // additional header by extension
int retry_count; // retry count for multipart
FILE* b_infile; // backup for retrying
int retry_count; // retry count, this is used only sleep time before retrying
std::unique_ptr<FILE, decltype(&s3fs_fclose)> b_infile = {nullptr, &s3fs_fclose}; // backup for retrying
const unsigned char* b_postdata; // backup for retrying
off_t b_postdata_remaining; // backup for retrying
off_t b_partdata_startpos; // backup for retrying
off_t b_partdata_size; // backup for retrying
size_t b_ssekey_pos; // backup for retrying
std::string b_ssevalue; // backup for retrying
sse_type_t b_ssetype; // backup for retrying
std::string b_from; // backup for retrying(for copy request)
headers_t b_meta; // backup for retrying(for copy request)
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
std::string query_string; // request query string
Semaphore *sem;
pthread_mutex_t *completed_tids_lock;
std::vector<pthread_t> *completed_tids;
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
CURLcode curlCode; // handle curl return
@ -210,13 +207,10 @@ class S3fsCurl
// class methods
static bool InitGlobalCurl();
static bool DestroyGlobalCurl();
static bool InitShareCurl();
static bool DestroyShareCurl();
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr);
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr);
static bool InitCryptMutex();
static bool DestroyCryptMutex();
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
static std::string extractURI(const std::string& url);
static bool LocateBundle();
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
@ -225,23 +219,16 @@ class S3fsCurl
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
static std::unique_ptr<S3fsCurl> UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
static std::unique_ptr<S3fsCurl> ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
// lazy functions for set curl options
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
static bool MultipartUploadPartSetCurlOpts(S3fsCurl* s3fscurl);
static bool CopyMultipartUploadSetCurlOpts(S3fsCurl* s3fscurl);
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
static bool LoadEnvSseCKeys();
static bool LoadEnvSseKmsid();
static bool PushbackSseKeys(const std::string& onekey);
static bool AddUserAgent(CURL* hCurl);
static bool AddUserAgent(const CurlUniquePtr& hCurl);
static int CurlDebugFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int CurlDebugBodyInFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
@ -249,21 +236,25 @@ class S3fsCurl
static int RawCurlDebugFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
// methods
bool ResetHandle(AutoLock::Type locktype = AutoLock::NONE);
bool ResetHandle() REQUIRES(S3fsCurl::curl_handles_lock);
bool RemakeHandle();
bool ClearInternalData();
void insertV4Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
bool insertV4Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
void insertV2Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
void insertIBMIAMHeaders(const std::string& access_key_id, const std::string& access_token);
void insertAuthHeaders();
bool insertAuthHeaders();
bool AddSseRequestHead(sse_type_t ssetype, const std::string& ssevalue, bool is_copy);
bool PreHeadRequest(const char* tpath, size_t ssekey_pos = -1);
bool PreHeadRequest(const std::string& tpath, size_t ssekey_pos = -1) {
return PreHeadRequest(tpath.c_str(), ssekey_pos);
}
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource, const std::string& secret_access_key, const std::string& access_token);
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601, const std::string& secret_access_key, const std::string& access_token);
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
bool UploadMultipartPostComplete();
bool CopyMultipartPostComplete();
int MapPutErrorResponse(int result);
int MultipartUploadContentPartSetup(const char* tpath, int part_num, const std::string& upload_id);
int MultipartUploadCopyPartSetup(const char* from, const char* to, int part_num, const std::string& upload_id, const headers_t& meta);
bool MultipartUploadContentPartComplete();
bool MultipartUploadCopyPartComplete();
int MapPutErrorResponse(int result) const;
public:
// class methods
@ -271,23 +262,15 @@ class S3fsCurl
static bool InitCredentialObject(S3fsCred* pcredobj);
static bool InitMimeType(const std::string& strFile);
static bool DestroyS3fsCurl();
static std::unique_ptr<S3fsCurl> CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, off_t size);
// lazy functions for set curl options(public)
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
// class methods(variables)
static std::string LookupMimeType(const std::string& name);
static bool SetCheckCertificate(bool isCertCheck);
static bool SetDnsCache(bool isCache);
static bool SetSslSessionCache(bool isCache);
static long SetConnectTimeout(long timeout);
static time_t SetReadwriteTimeout(time_t timeout);
static time_t GetReadwriteTimeout() { return S3fsCurl::readwrite_timeout; }
static int SetRetries(int count);
static int GetRetries();
static bool SetPublicBucket(bool flag);
static bool IsPublicBucket() { return S3fsCurl::is_public_bucket; }
static acl_t SetDefaultAcl(acl_t acl);
@ -316,13 +299,8 @@ class S3fsCurl
static bool IsDumpBody() { return S3fsCurl::is_dump_body; }
static long SetSslVerifyHostname(long value);
static long GetSslVerifyHostname() { return S3fsCurl::ssl_verify_hostname; }
static bool SetSSLClientCertOptions(const std::string& values);
static void ResetOffset(S3fsCurl* pCurl);
// maximum parallel GET and PUT requests
static int SetMaxParallelCount(int value);
static int GetMaxParallelCount() { return S3fsCurl::max_parallel_cnt; }
// maximum parallel HEAD requests
static int SetMaxMultiRequest(int max);
static int GetMaxMultiRequest() { return S3fsCurl::max_multireq; }
static bool SetMultipartSize(off_t size);
static off_t GetMultipartSize() { return S3fsCurl::multipart_size; }
static bool SetMultipartCopySize(off_t size);
@ -340,10 +318,12 @@ class S3fsCurl
static bool IsRequesterPays() { return S3fsCurl::requester_pays; }
static bool SetProxy(const char* url);
static bool SetProxyUserPwd(const char* userpwd);
static bool SetIPResolveType(const char* value);
// methods
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true, AutoLock::Type locktype = AutoLock::NONE);
bool CreateCurlHandle(bool remake = false);
bool DestroyCurlHandle(bool clear_internal_data = true);
bool DestroyCurlHandleHasLock(bool clear_internal_data = true) REQUIRES(S3fsCurl::curl_handles_lock);
bool GetIAMCredentials(const char* cred_url, const char* iam_v2_token, const char* ibm_secret_access_key, std::string& response);
bool GetIAMRoleFromMetaData(const char* cred_url, const char* iam_v2_token, std::string& token);
@ -351,47 +331,35 @@ class S3fsCurl
int RequestPerform(bool dontAddAuthHeaders=false);
int DeleteRequest(const char* tpath);
int GetIAMv2ApiToken(const char* token_url, int token_ttl, const char* token_ttl_hdr, std::string& response);
bool PreHeadRequest(const char* tpath, const char* bpath = nullptr, const char* savedpath = nullptr, size_t ssekey_pos = -1);
bool PreHeadRequest(const std::string& tpath, const std::string& bpath, const std::string& savedpath, size_t ssekey_pos = -1) {
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
}
int HeadRequest(const char* tpath, headers_t& meta);
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
int PutHeadRequest(const char* tpath, const headers_t& meta, bool is_copy);
int PutRequest(const char* tpath, headers_t& meta, int fd);
int PreGetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, off_t size = -1);
int GetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
int CheckBucket(const char* check_path, bool compat_dir, bool force_no_sse);
int ListBucketRequest(const char* tpath, const char* query);
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
bool MixMultipartPostComplete();
int PreMultipartUploadRequest(const char* tpath, const headers_t& meta, std::string& upload_id);
int MultipartUploadPartSetup(const char* tpath, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy);
int MultipartUploadComplete(const char* tpath, const std::string& upload_id, const etaglist_t& parts);
bool MultipartUploadPartComplete();
int MultipartListRequest(std::string& body);
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etagpair* petagpair);
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
int MultipartPutHeadRequest(const std::string& from, const std::string& to, int part_number, const std::string& upload_id, const headers_t& meta);
int MultipartUploadPartRequest(const char* tpath, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy);
// methods(variables)
CURL* GetCurlHandle() const { return hCurl; }
std::string GetPath() const { return path; }
std::string GetBasePath() const { return base_path; }
std::string GetSpecialSavedPath() const { return saved_path; }
std::string GetUrl() const { return url; }
std::string GetOp() const { return op; }
const std::string& GetPath() const { return path; }
const std::string& GetUrl() const { return url; }
const std::string& GetOp() const { return op; }
const headers_t* GetResponseHeaders() const { return &responseHeaders; }
const std::string* GetBodyData() const { return &bodydata; }
const std::string* GetHeadData() const { return &headdata; }
const std::string& GetBodyData() const { return bodydata; }
const std::string& GetHeadData() const { return headdata; }
CURLcode GetCurlCode() const { return curlCode; }
long GetLastResponseCode() const { return LastResponseCode; }
bool SetUseAhbe(bool ahbe);
bool EnableUseAhbe() { return SetUseAhbe(true); }
bool DisableUseAhbe() { return SetUseAhbe(false); }
bool IsUseAhbe() const { return is_use_ahbe; }
int GetMultipartRetryCount() const { return retry_count; }
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
bool IsOverMultipartRetryCount() const { return (retry_count >= S3fsCurl::retries); }
size_t GetLastPreHeadSeecKeyPos() const { return b_ssekey_pos; }
};
#endif // S3FS_CURL_H_

View File

@ -1,137 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include "s3fs_logger.h"
#include "curl_handlerpool.h"
#include "autolock.h"
//-------------------------------------------------------------------
// Class CurlHandlerPool
//-------------------------------------------------------------------
bool CurlHandlerPool::Init()
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
if (0 != pthread_mutex_init(&mLock, &attr)) {
S3FS_PRN_ERR("Init curl handlers lock failed");
return false;
}
for(int cnt = 0; cnt < mMaxHandlers; ++cnt){
CURL* hCurl = curl_easy_init();
if(!hCurl){
S3FS_PRN_ERR("Init curl handlers pool failed");
Destroy();
return false;
}
mPool.push_back(hCurl);
}
return true;
}
bool CurlHandlerPool::Destroy()
{
{
AutoLock lock(&mLock);
while(!mPool.empty()){
CURL* hCurl = mPool.back();
mPool.pop_back();
if(hCurl){
curl_easy_cleanup(hCurl);
}
}
}
if (0 != pthread_mutex_destroy(&mLock)) {
S3FS_PRN_ERR("Destroy curl handlers lock failed");
return false;
}
return true;
}
CURL* CurlHandlerPool::GetHandler(bool only_pool)
{
AutoLock lock(&mLock);
CURL* hCurl = nullptr;
if(!mPool.empty()){
hCurl = mPool.back();
mPool.pop_back();
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
}
if(only_pool){
return hCurl;
}
if(!hCurl){
S3FS_PRN_INFO("Pool empty: force to create new handler");
hCurl = curl_easy_init();
}
return hCurl;
}
void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
{
if(!hCurl){
return;
}
AutoLock lock(&mLock);
if(restore_pool){
S3FS_PRN_DBG("Return handler to pool");
mPool.push_back(hCurl);
while(mMaxHandlers < static_cast<int>(mPool.size())){
CURL* hOldCurl = mPool.front();
mPool.pop_front();
if(hOldCurl){
S3FS_PRN_INFO("Pool full: destroy the oldest handler");
curl_easy_cleanup(hOldCurl);
}
}
}else{
S3FS_PRN_INFO("Pool full: destroy the handler");
curl_easy_cleanup(hCurl);
}
}
void CurlHandlerPool::ResetHandler(CURL* hCurl)
{
if(!hCurl){
return;
}
AutoLock lock(&mLock);
curl_easy_reset(hCurl);
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -1,394 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include <cstdlib>
#include <cerrno>
#include <vector>
#include "s3fs.h"
#include "s3fs_logger.h"
#include "curl_multi.h"
#include "curl.h"
#include "autolock.h"
#include "psemaphore.h"
//-------------------------------------------------------------------
// Class S3fsMultiCurl
//-------------------------------------------------------------------
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism, bool not_abort) : maxParallelism(maxParallelism), not_abort(not_abort), SuccessCallback(nullptr), NotFoundCallback(nullptr), RetryCallback(nullptr), pSuccessCallbackParam(nullptr), pNotFoundCallbackParam(nullptr)
{
int result;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
if (0 != (result = pthread_mutex_init(&completed_tids_lock, &attr))) {
S3FS_PRN_ERR("could not initialize completed_tids_lock: %i", result);
abort();
}
}
S3fsMultiCurl::~S3fsMultiCurl()
{
Clear();
int result;
if(0 != (result = pthread_mutex_destroy(&completed_tids_lock))){
S3FS_PRN_ERR("could not destroy completed_tids_lock: %i", result);
}
}
bool S3fsMultiCurl::ClearEx(bool is_all)
{
s3fscurllist_t::iterator iter;
for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){
S3fsCurl* s3fscurl = iter->get();
if(s3fscurl){
s3fscurl->DestroyCurlHandle();
}
}
clist_req.clear();
if(is_all){
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
S3fsCurl* s3fscurl = iter->get();
s3fscurl->DestroyCurlHandle();
}
clist_all.clear();
}
S3FS_MALLOCTRIM(0);
return true;
}
S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function)
{
S3fsMultiSuccessCallback old = SuccessCallback;
SuccessCallback = function;
return old;
}
S3fsMultiNotFoundCallback S3fsMultiCurl::SetNotFoundCallback(S3fsMultiNotFoundCallback function)
{
S3fsMultiNotFoundCallback old = NotFoundCallback;
NotFoundCallback = function;
return old;
}
S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function)
{
S3fsMultiRetryCallback old = RetryCallback;
RetryCallback = function;
return old;
}
void* S3fsMultiCurl::SetSuccessCallbackParam(void* param)
{
void* old = pSuccessCallbackParam;
pSuccessCallbackParam = param;
return old;
}
void* S3fsMultiCurl::SetNotFoundCallbackParam(void* param)
{
void* old = pNotFoundCallbackParam;
pNotFoundCallbackParam = param;
return old;
}
bool S3fsMultiCurl::SetS3fsCurlObject(std::unique_ptr<S3fsCurl> s3fscurl)
{
if(!s3fscurl){
return false;
}
clist_all.push_back(std::move(s3fscurl));
return true;
}
int S3fsMultiCurl::MultiPerform()
{
std::vector<pthread_t> threads;
bool success = true;
bool isMultiHead = false;
Semaphore sem(GetMaxParallelism());
int rc;
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
pthread_t thread;
S3fsCurl* s3fscurl = iter->get();
if(!s3fscurl){
continue;
}
sem.wait();
{
AutoLock lock(&completed_tids_lock);
for(std::vector<pthread_t>::iterator it = completed_tids.begin(); it != completed_tids.end(); ++it){
void* retval;
rc = pthread_join(*it, &retval);
if (rc) {
success = false;
S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc));
} else {
long int_retval = reinterpret_cast<long>(retval);
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
S3FS_PRN_WARN("thread terminated with non-zero return code: %ld", int_retval);
}
}
}
completed_tids.clear();
}
s3fscurl->sem = &sem;
s3fscurl->completed_tids_lock = &completed_tids_lock;
s3fscurl->completed_tids = &completed_tids;
isMultiHead |= s3fscurl->GetOp() == "HEAD";
rc = pthread_create(&thread, nullptr, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
if (rc != 0) {
success = false;
S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc);
break;
}
threads.push_back(thread);
}
for(int i = 0; i < sem.get_value(); ++i){
sem.wait();
}
AutoLock lock(&completed_tids_lock);
for (std::vector<pthread_t>::iterator titer = completed_tids.begin(); titer != completed_tids.end(); ++titer) {
void* retval;
rc = pthread_join(*titer, &retval);
if (rc) {
success = false;
S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc);
} else {
long int_retval = reinterpret_cast<long>(retval);
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
S3FS_PRN_WARN("thread terminated with non-zero return code: %ld", int_retval);
}
}
}
completed_tids.clear();
return success ? 0 : -EIO;
}
int S3fsMultiCurl::MultiRead()
{
int result = 0;
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){
std::unique_ptr<S3fsCurl> s3fscurl(std::move(*iter));
bool isRetry = false;
bool isPostpone = false;
bool isNeedResetOffset = true;
long responseCode = S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET;
CURLcode curlCode = s3fscurl->GetCurlCode();
if(s3fscurl->GetResponseCode(responseCode, false) && curlCode == CURLE_OK){
if(S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET == responseCode){
// This is a case where the processing result has not yet been updated (should be very rare).
isPostpone = true;
}else if(400 > responseCode){
// add into stat cache
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownPointerToBool
if(SuccessCallback && !SuccessCallback(s3fscurl.get(), pSuccessCallbackParam)){
S3FS_PRN_WARN("error from success callback function(%s).", s3fscurl->url.c_str());
}
}else if(400 == responseCode){
// as possibly in multipart
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
isRetry = true;
}else if(404 == responseCode){
// not found
// HEAD requests on readdir_multi_head can return 404
if(s3fscurl->GetOp() != "HEAD"){
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
}
// Call callback function
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownPointerToBool
if(NotFoundCallback && !NotFoundCallback(s3fscurl.get(), pNotFoundCallbackParam)){
S3FS_PRN_WARN("error from not found callback function(%s).", s3fscurl->url.c_str());
}
}else if(500 == responseCode){
// case of all other result, do retry.(11/13/2013)
// because it was found that s3fs got 500 error from S3, but could success
// to retry it.
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
isRetry = true;
}else{
// Retry in other case.
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
isRetry = true;
}
}else{
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
// Reuse partical file
switch(curlCode){
case CURLE_OPERATION_TIMEDOUT:
isRetry = true;
isNeedResetOffset = false;
break;
case CURLE_PARTIAL_FILE:
isRetry = true;
isNeedResetOffset = false;
break;
default:
S3FS_PRN_ERR("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode));
isRetry = true;
break;
}
}
if(isPostpone){
clist_req.erase(iter);
clist_req.push_back(std::move(s3fscurl)); // Re-evaluate at the end
iter = clist_req.begin();
}else{
if(!isRetry || (!not_abort && 0 != result)){
// If an EIO error has already occurred, it will be terminated
// immediately even if retry processing is required.
s3fscurl->DestroyCurlHandle();
}else{
// Reset offset
if(isNeedResetOffset){
S3fsCurl::ResetOffset(s3fscurl.get());
}
// For retry
std::unique_ptr<S3fsCurl> retrycurl;
const S3fsCurl* retrycurl_ptr = retrycurl.get(); // save this due to std::move below
if(RetryCallback){
retrycurl = RetryCallback(s3fscurl.get());
if(nullptr != retrycurl){
clist_all.push_back(std::move(retrycurl));
}else{
// set EIO and wait for other parts.
result = -EIO;
}
}
// cppcheck-suppress mismatchingContainers
if(s3fscurl.get() != retrycurl_ptr){
s3fscurl->DestroyCurlHandle();
}
}
iter = clist_req.erase(iter);
}
}
clist_req.clear();
if(!not_abort && 0 != result){
// If an EIO error has already occurred, clear all retry objects.
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
S3fsCurl* s3fscurl = iter->get();
s3fscurl->DestroyCurlHandle();
}
clist_all.clear();
}
return result;
}
int S3fsMultiCurl::Request()
{
S3FS_PRN_INFO3("[count=%zu]", clist_all.size());
// Make request list.
//
// Send multi request loop( with retry )
// (When many request is sends, sometimes gets "Couldn't connect to server")
//
while(!clist_all.empty()){
// set curl handle to multi handle
int result;
s3fscurllist_t::iterator iter;
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
clist_req.push_back(std::move(*iter));
}
clist_all.clear();
// Send multi request.
if(0 != (result = MultiPerform())){
Clear();
return result;
}
// Read the result
if(0 != (result = MultiRead())){
Clear();
return result;
}
// Cleanup curl handle in multi handle
ClearEx(false);
}
return 0;
}
//
// thread function for performing an S3fsCurl request
//
void* S3fsMultiCurl::RequestPerformWrapper(void* arg)
{
S3fsCurl* s3fscurl= static_cast<S3fsCurl*>(arg);
void* result = nullptr;
if(!s3fscurl){
return reinterpret_cast<void*>(static_cast<intptr_t>(-EIO));
}
if(s3fscurl->fpLazySetup){
if(!s3fscurl->fpLazySetup(s3fscurl)){
S3FS_PRN_ERR("Failed to lazy setup, then respond EIO.");
result = reinterpret_cast<void*>(static_cast<intptr_t>(-EIO));
}
}
if(!result){
result = reinterpret_cast<void*>(static_cast<intptr_t>(s3fscurl->RequestPerform()));
s3fscurl->DestroyCurlHandle(true, false);
}
AutoLock lock(s3fscurl->completed_tids_lock);
s3fscurl->completed_tids->push_back(pthread_self());
s3fscurl->sem->post();
return result;
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -1,90 +0,0 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CURL_MULTI_H_
#define S3FS_CURL_MULTI_H_
#include <memory>
#include <vector>
//----------------------------------------------
// Typedef
//----------------------------------------------
class S3fsCurl;
typedef std::vector<std::unique_ptr<S3fsCurl>> s3fscurllist_t;
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
typedef bool (*S3fsMultiNotFoundCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
typedef std::unique_ptr<S3fsCurl> (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
//----------------------------------------------
// class S3fsMultiCurl
//----------------------------------------------
class S3fsMultiCurl
{
private:
const int maxParallelism;
s3fscurllist_t clist_all; // all of curl requests
s3fscurllist_t clist_req; // curl requests are sent
bool not_abort; // complete all requests without aborting on errors
S3fsMultiSuccessCallback SuccessCallback;
S3fsMultiNotFoundCallback NotFoundCallback;
S3fsMultiRetryCallback RetryCallback;
void* pSuccessCallbackParam;
void* pNotFoundCallbackParam;
pthread_mutex_t completed_tids_lock;
std::vector<pthread_t> completed_tids;
private:
bool ClearEx(bool is_all);
int MultiPerform();
int MultiRead();
static void* RequestPerformWrapper(void* arg);
public:
explicit S3fsMultiCurl(int maxParallelism, bool not_abort = false);
~S3fsMultiCurl();
int GetMaxParallelism() const { return maxParallelism; }
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
S3fsMultiNotFoundCallback SetNotFoundCallback(S3fsMultiNotFoundCallback function);
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
void* SetSuccessCallbackParam(void* param);
void* SetNotFoundCallbackParam(void* param);
bool Clear() { return ClearEx(true); }
bool SetS3fsCurlObject(std::unique_ptr<S3fsCurl> s3fscurl);
int Request();
};
#endif // S3FS_CURL_MULTI_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

234
src/curl_share.cpp Normal file
View File

@ -0,0 +1,234 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "common.h"
#include "s3fs.h"
#include "s3fs_logger.h"
#include "curl_share.h"
//-------------------------------------------------------------------
// Class S3fsCurlShare
//-------------------------------------------------------------------
bool S3fsCurlShare::is_dns_cache = true; // default
bool S3fsCurlShare::is_ssl_cache = true; // default
std::mutex S3fsCurlShare::curl_share_lock;
std::map<std::thread::id, CurlSharePtr> S3fsCurlShare::ShareHandles;
std::map<std::thread::id, ShareLocksPtr> S3fsCurlShare::ShareLocks;
//-------------------------------------------------------------------
// Class methods for S3fsCurlShare
//-------------------------------------------------------------------
bool S3fsCurlShare::SetDnsCache(bool isCache)
{
bool old = S3fsCurlShare::is_dns_cache;
S3fsCurlShare::is_dns_cache = isCache;
return old;
}
bool S3fsCurlShare::SetSslSessionCache(bool isCache)
{
bool old = S3fsCurlShare::is_ssl_cache;
S3fsCurlShare::is_ssl_cache = isCache;
return old;
}
void S3fsCurlShare::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr)
{
auto* pLocks = static_cast<curl_share_locks*>(useptr);
if(CURL_LOCK_DATA_DNS == nLockData){
pLocks->lock_dns.lock();
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
pLocks->lock_session.lock();
}
}
void S3fsCurlShare::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr)
{
auto* pLocks = static_cast<curl_share_locks*>(useptr);
if(CURL_LOCK_DATA_DNS == nLockData){
pLocks->lock_dns.unlock();
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
pLocks->lock_session.unlock();
}
}
bool S3fsCurlShare::SetCurlShareHandle(CURL* hCurl)
{
if(!hCurl){
S3FS_PRN_ERR("Curl handle is null");
return false;
}
// get curl share handle
S3fsCurlShare CurlShareObj;
CURLSH* hCurlShare = CurlShareObj.GetCurlShareHandle();
if(!hCurlShare){
// a case of not to use CurlShare
return true;
}
// set share handle to curl handle
if(CURLE_OK != curl_easy_setopt(hCurl, CURLOPT_SHARE, hCurlShare)){
S3FS_PRN_ERR("Failed to set Curl share handle to curl handle.");
return false;
}
return true;
}
bool S3fsCurlShare::DestroyCurlShareHandleForThread()
{
S3fsCurlShare CurlShareObj;
CurlShareObj.DestroyCurlShareHandle();
return true;
}
bool S3fsCurlShare::InitializeCurlShare(const CurlSharePtr& hShare, const ShareLocksPtr& ShareLock)
{
CURLSHcode nSHCode;
// set lock handlers
if(CURLSHE_OK != (nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_LOCKFUNC, S3fsCurlShare::LockCurlShare))){
S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
return false;
}
if(CURLSHE_OK != (nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_UNLOCKFUNC, S3fsCurlShare::UnlockCurlShare))){
S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
return false;
}
// set user data for lock functions
if(CURLSHE_OK != (nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_USERDATA, ShareLock.get()))){
S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
return false;
}
// set share type
if(S3fsCurlShare::is_dns_cache){
nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS);
if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){
S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
return false;
}else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){
S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode));
}
}
if(S3fsCurlShare::is_ssl_cache){
nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION);
if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){
S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
return false;
}else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){
S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode));
}
}
return true;
}
//-------------------------------------------------------------------
// Methods for S3fsCurlShare
//-------------------------------------------------------------------
// [NOTE]
// set current thread id(std style) to ThreadId
//
S3fsCurlShare::S3fsCurlShare() : ThreadId(std::this_thread::get_id())
{
}
void S3fsCurlShare::DestroyCurlShareHandle()
{
if(!S3fsCurlShare::is_dns_cache && !S3fsCurlShare::is_ssl_cache){
// Any curl share handle does not exist
return;
}
const std::lock_guard<std::mutex> lock(S3fsCurlShare::curl_share_lock);
// find existed handle and cleanup it
auto handle_iter = S3fsCurlShare::ShareHandles.find(ThreadId);
if(handle_iter == S3fsCurlShare::ShareHandles.end()){
S3FS_PRN_WARN("Not found curl share handle");
}else{
S3fsCurlShare::ShareHandles.erase(handle_iter);
}
// find lock and cleanup it
auto locks_iter = S3fsCurlShare::ShareLocks.find(ThreadId);
if(locks_iter == S3fsCurlShare::ShareLocks.end()){
S3FS_PRN_WARN("Not found locks of curl share handle");
}else{
S3fsCurlShare::ShareLocks.erase(locks_iter);
}
}
CURLSH* S3fsCurlShare::GetCurlShareHandle()
{
if(!S3fsCurlShare::is_dns_cache && !S3fsCurlShare::is_ssl_cache){
// Any curl share handle does not exist
return nullptr;
}
const std::lock_guard<std::mutex> lock(S3fsCurlShare::curl_share_lock);
// find existed handle
auto handle_iter = S3fsCurlShare::ShareHandles.find(ThreadId);
if(handle_iter != S3fsCurlShare::ShareHandles.end()){
// Already created share handle for this thread.
return handle_iter->second.get();
}
// create new curl share handle and locks
CurlSharePtr hShare = {nullptr, curl_share_cleanup};
hShare.reset(curl_share_init());
if(!hShare){
S3FS_PRN_ERR("Failed to create curl share handle");
return nullptr;
}
auto pLocks = std::make_unique<curl_share_locks>();
// Initialize curl share handle
if(!S3fsCurlShare::InitializeCurlShare(hShare, pLocks)){
S3FS_PRN_ERR("Failed to initialize curl share handle");
return nullptr;
}
// set map
S3fsCurlShare::ShareHandles.emplace(ThreadId, std::move(hShare));
S3fsCurlShare::ShareLocks.emplace(ThreadId, std::move(pLocks));
// For clang-tidy measures
handle_iter = S3fsCurlShare::ShareHandles.find(ThreadId);
if(handle_iter == S3fsCurlShare::ShareHandles.end()){
S3FS_PRN_ERR("Failed to insert curl share to map.");
return nullptr;
}
return handle_iter->second.get();
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

89
src/curl_share.h Normal file
View File

@ -0,0 +1,89 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CURL_SHARE_H_
#define S3FS_CURL_SHARE_H_
#include <curl/curl.h>
#include <map>
#include <memory>
#include <mutex>
#include <thread>
#include "common.h"
//----------------------------------------------
// Structure / Typedefs
//----------------------------------------------
struct curl_share_locks {
std::mutex lock_dns;
std::mutex lock_session;
};
typedef std::unique_ptr<CURLSH, decltype(&curl_share_cleanup)> CurlSharePtr;
typedef std::unique_ptr<curl_share_locks> ShareLocksPtr;
//----------------------------------------------
// class S3fsCurlShare
//----------------------------------------------
class S3fsCurlShare
{
private:
static bool is_dns_cache;
static bool is_ssl_cache;
static std::mutex curl_share_lock;
static std::map<std::thread::id, CurlSharePtr> ShareHandles GUARDED_BY(curl_share_lock);
static std::map<std::thread::id, ShareLocksPtr> ShareLocks GUARDED_BY(curl_share_lock);
std::thread::id ThreadId;
private:
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr) NO_THREAD_SAFETY_ANALYSIS;
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr) NO_THREAD_SAFETY_ANALYSIS;
static bool InitializeCurlShare(const CurlSharePtr& hShare, const ShareLocksPtr& ShareLock) REQUIRES(curl_share_lock);
void DestroyCurlShareHandle();
CURLSH* GetCurlShareHandle();
public:
static bool SetDnsCache(bool isCache);
static bool SetSslSessionCache(bool isCache);
static bool SetCurlShareHandle(CURL* hCurl);
static bool DestroyCurlShareHandleForThread();
// constructor/destructor
explicit S3fsCurlShare();
~S3fsCurlShare() = default;
S3fsCurlShare(const S3fsCurlShare&) = delete;
S3fsCurlShare(S3fsCurlShare&&) = delete;
S3fsCurlShare& operator=(const S3fsCurlShare&) = delete;
S3fsCurlShare& operator=(S3fsCurlShare&&) = delete;
};
#endif // S3FS_CURL_SHARE_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -21,6 +21,7 @@
#include <cstdio>
#include <cstdlib>
#include <curl/curl.h>
#include <string>
#include "common.h"
#include "s3fs_logger.h"
@ -29,6 +30,8 @@
#include "s3fs_auth.h"
#include "s3fs_cred.h"
using namespace std::string_literals;
//-------------------------------------------------------------------
// Utility Functions
//-------------------------------------------------------------------
@ -46,7 +49,7 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* k
// key & value are trimmed and lower (only key)
std::string strkey = trim(key);
std::string strval = value ? trim(value) : "";
std::string strnew = key + std::string(": ") + strval;
std::string strnew = key + ": "s + strval;
char* data;
if(nullptr == (data = strdup(strnew.c_str()))){
return list;
@ -207,7 +210,7 @@ bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::strin
std::string prepare_url(const char* url)
{
S3FS_PRN_INFO3("URL is %s", url);
S3FS_PRN_DBG("URL is %s", url);
std::string uri;
std::string hostname;
@ -240,7 +243,7 @@ std::string prepare_url(const char* url)
url_str = uri + hostname + path;
S3FS_PRN_INFO3("URL changed is %s", url_str.c_str());
S3FS_PRN_DBG("URL changed is %s", url_str.c_str());
return url_str;
}

View File

@ -21,9 +21,12 @@
#ifndef S3FS_CURL_UTIL_H_
#define S3FS_CURL_UTIL_H_
#include <cstdint>
#include <curl/curl.h>
#include <string>
#include "metaheader.h"
enum class sse_type_t;
enum class sse_type_t : uint8_t;
//----------------------------------------------
// Functions
@ -36,6 +39,7 @@ std::string get_header_value(const struct curl_slist* list, const std::string &k
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
std::string prepare_url(const char* url);
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
int put_headers(const char* path, const headers_t& meta, bool is_copy, bool use_st_size = true); // implement in s3fs.cpp
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5);
std::string url_to_host(const std::string &url);

View File

@ -24,8 +24,11 @@
#include <climits>
#include <unistd.h>
#include <dirent.h>
#include <mutex>
#include <string>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <utility>
#include "fdcache.h"
#include "fdcache_stat.h"
@ -33,7 +36,6 @@
#include "s3fs_logger.h"
#include "s3fs_cred.h"
#include "string_util.h"
#include "autolock.h"
//
// The following symbols are used by FdManager::RawCheckAllCache().
@ -76,10 +78,10 @@ static constexpr char NOCACHE_PATH_PREFIX_FORM[] = " __S3FS_UNEXISTED_PATH_%lx__
// FdManager class variable
//------------------------------------------------
FdManager FdManager::singleton;
pthread_mutex_t FdManager::fd_manager_lock;
pthread_mutex_t FdManager::cache_cleanup_lock;
pthread_mutex_t FdManager::reserved_diskspace_lock;
bool FdManager::is_lock_init(false);
std::mutex FdManager::fd_manager_lock;
std::mutex FdManager::cache_cleanup_lock;
std::mutex FdManager::reserved_diskspace_lock;
std::mutex FdManager::except_entmap_lock;
std::string FdManager::cache_dir;
bool FdManager::check_cache_dir_exist(false);
off_t FdManager::free_disk_space = 0;
@ -105,7 +107,7 @@ bool FdManager::SetCacheDir(const char* dir)
bool FdManager::SetCacheCheckOutput(const char* path)
{
if(!path || '\0' == path[0]){
check_cache_output.erase();
check_cache_output.clear();
}else{
check_cache_output = path;
}
@ -235,18 +237,17 @@ bool FdManager::CheckCacheDirExist()
if(FdManager::cache_dir.empty()){
return true;
}
return IsDir(&cache_dir);
return IsDir(cache_dir);
}
off_t FdManager::GetEnsureFreeDiskSpace()
off_t FdManager::GetEnsureFreeDiskSpaceHasLock()
{
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
return FdManager::free_disk_space;
}
off_t FdManager::SetEnsureFreeDiskSpace(off_t size)
{
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
off_t old = FdManager::free_disk_space;
FdManager::free_disk_space = size;
return old;
@ -254,9 +255,11 @@ off_t FdManager::SetEnsureFreeDiskSpace(off_t size)
bool FdManager::InitFakeUsedDiskSize(off_t fake_freesize)
{
FdManager::fake_used_disk_space = 0; // At first, clear this value because this value is used in GetFreeDiskSpace.
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
off_t actual_freesize = FdManager::GetFreeDiskSpace(nullptr);
FdManager::fake_used_disk_space = 0; // At first, clear this value because this value is used in GetFreeDiskSpaceHasLock.
off_t actual_freesize = FdManager::GetFreeDiskSpaceHasLock(nullptr);
if(fake_freesize < actual_freesize){
FdManager::fake_used_disk_space = actual_freesize - fake_freesize;
@ -284,7 +287,7 @@ off_t FdManager::GetTotalDiskSpace(const char* path)
return actual_totalsize;
}
off_t FdManager::GetFreeDiskSpace(const char* path)
off_t FdManager::GetFreeDiskSpaceHasLock(const char* path)
{
struct statvfs vfsbuf;
int result = FdManager::GetVfsStat(path, &vfsbuf);
@ -321,22 +324,20 @@ int FdManager::GetVfsStat(const char* path, struct statvfs* vfsbuf){
return 0;
}
bool FdManager::IsSafeDiskSpace(const char* path, off_t size)
bool FdManager::IsSafeDiskSpace(const char* path, off_t size, bool withmsg)
{
off_t fsize = FdManager::GetFreeDiskSpace(path);
return size + FdManager::GetEnsureFreeDiskSpace() <= fsize;
}
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
bool FdManager::IsSafeDiskSpaceWithLog(const char* path, off_t size)
{
off_t fsize = FdManager::GetFreeDiskSpace(path);
off_t needsize = size + FdManager::GetEnsureFreeDiskSpace();
if(needsize <= fsize){
return true;
} else {
S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs. Requires %.3f MB, already has %.3f MB.", static_cast<double>(needsize) / 1024 / 1024, static_cast<double>(fsize) / 1024 / 1024);
off_t fsize = FdManager::GetFreeDiskSpaceHasLock(path);
off_t needsize = size + FdManager::GetEnsureFreeDiskSpaceHasLock();
if(fsize < needsize){
if(withmsg){
S3FS_PRN_EXIT("There is not enough disk space for use as cache(or temporary) directory by s3fs. Requires %.3f MB, already has %.3f MB.", static_cast<double>(needsize) / 1024 / 1024, static_cast<double>(fsize) / 1024 / 1024);
}
return false;
}
return true;
}
bool FdManager::HaveLseekHole()
@ -347,7 +348,7 @@ bool FdManager::HaveLseekHole()
// create temporary file
int fd;
std::unique_ptr<FILE, decltype(&s3fs_fclose)> ptmpfp(MakeTempFile(), &s3fs_fclose);
auto ptmpfp = MakeTempFile();
if(nullptr == ptmpfp || -1 == (fd = fileno(ptmpfp.get()))){
S3FS_PRN_ERR("failed to open temporary file by errno(%d)", errno);
FdManager::checked_lseek = true;
@ -385,16 +386,16 @@ bool FdManager::SetTmpDir(const char *dir)
return true;
}
bool FdManager::IsDir(const std::string* dir)
bool FdManager::IsDir(const std::string& dir)
{
// check the directory
struct stat st;
if(0 != stat(dir->c_str(), &st)){
S3FS_PRN_ERR("could not stat() directory %s by errno(%d).", dir->c_str(), errno);
if(0 != stat(dir.c_str(), &st)){
S3FS_PRN_ERR("could not stat() directory %s by errno(%d).", dir.c_str(), errno);
return false;
}
if(!S_ISDIR(st.st_mode)){
S3FS_PRN_ERR("the directory %s is not a directory.", dir->c_str());
S3FS_PRN_ERR("the directory %s is not a directory.", dir.c_str());
return false;
}
return true;
@ -405,10 +406,10 @@ bool FdManager::CheckTmpDirExist()
if(FdManager::tmp_dir.empty()){
return true;
}
return IsDir(&tmp_dir);
return IsDir(tmp_dir);
}
FILE* FdManager::MakeTempFile() {
std::unique_ptr<FILE, decltype(&s3fs_fclose)> FdManager::MakeTempFile() {
int fd;
char cfn[PATH_MAX];
std::string fn = tmp_dir + "/s3fstmp.XXXXXX";
@ -418,22 +419,22 @@ FILE* FdManager::MakeTempFile() {
fd = mkstemp(cfn);
if (-1 == fd) {
S3FS_PRN_ERR("failed to create tmp file. errno(%d)", errno);
return nullptr;
return {nullptr, &s3fs_fclose};
}
if (-1 == unlink(cfn)) {
S3FS_PRN_ERR("failed to delete tmp file. errno(%d)", errno);
return nullptr;
return {nullptr, &s3fs_fclose};
}
return fdopen(fd, "rb+");
return {fdopen(fd, "rb+"), &s3fs_fclose};
}
bool FdManager::HasOpenEntityFd(const char* path)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
const FdEntity* ent;
int fd = -1;
if(nullptr == (ent = FdManager::singleton.GetFdEntity(path, fd, false, AutoLock::ALREADY_LOCKED))){
if(nullptr == (ent = FdManager::singleton.GetFdEntityHasLock(path, fd, false))){
return false;
}
return (0 < ent->GetOpenCount());
@ -444,7 +445,7 @@ bool FdManager::HasOpenEntityFd(const char* path)
//
int FdManager::GetOpenFdCount(const char* path)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
return FdManager::singleton.GetPseudoFdCount(path);
}
@ -454,27 +455,7 @@ int FdManager::GetOpenFdCount(const char* path)
//------------------------------------------------
FdManager::FdManager()
{
if(this == FdManager::get()){
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&FdManager::fd_manager_lock, &attr))){
S3FS_PRN_CRIT("failed to init fd_manager_lock: %d", result);
abort();
}
if(0 != (result = pthread_mutex_init(&FdManager::cache_cleanup_lock, &attr))){
S3FS_PRN_CRIT("failed to init cache_cleanup_lock: %d", result);
abort();
}
if(0 != (result = pthread_mutex_init(&FdManager::reserved_diskspace_lock, &attr))){
S3FS_PRN_CRIT("failed to init reserved_diskspace_lock: %d", result);
abort();
}
FdManager::is_lock_init = true;
}else{
if(this != FdManager::get()){
abort();
}
}
@ -482,98 +463,84 @@ FdManager::FdManager()
FdManager::~FdManager()
{
if(this == FdManager::get()){
for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){
for(auto iter = fent.cbegin(); fent.cend() != iter; ++iter){
FdEntity* ent = (*iter).second.get();
S3FS_PRN_WARN("To exit with the cache file opened: path=%s, refcnt=%d", ent->GetPath().c_str(), ent->GetOpenCount());
}
fent.clear();
if(FdManager::is_lock_init){
int result;
if(0 != (result = pthread_mutex_destroy(&FdManager::fd_manager_lock))){
S3FS_PRN_CRIT("failed to destroy fd_manager_lock: %d", result);
abort();
}
if(0 != (result = pthread_mutex_destroy(&FdManager::cache_cleanup_lock))){
S3FS_PRN_CRIT("failed to destroy cache_cleanup_lock: %d", result);
abort();
}
if(0 != (result = pthread_mutex_destroy(&FdManager::reserved_diskspace_lock))){
S3FS_PRN_CRIT("failed to destroy reserved_diskspace_lock: %d", result);
abort();
}
FdManager::is_lock_init = false;
}
except_fent.clear();
}else{
abort();
}
}
FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, AutoLock::Type locktype)
FdEntity* FdManager::GetFdEntityHasLock(const char* path, int& existfd, bool newfd)
{
S3FS_PRN_INFO3("[path=%s][pseudo_fd=%d]", SAFESTRPTR(path), existfd);
if(!path || '\0' == path[0]){
return nullptr;
}
AutoLock auto_lock(&FdManager::fd_manager_lock, locktype);
fdent_map_t::iterator iter = fent.find(path);
if(fent.end() != iter && iter->second){
UpdateEntityToTempPath();
auto fiter = fent.find(path);
if(fent.cend() != fiter && fiter->second){
if(-1 == existfd){
if(newfd){
existfd = iter->second->OpenPseudoFd(O_RDWR); // [NOTE] O_RDWR flags
existfd = fiter->second->OpenPseudoFd(O_RDWR); // [NOTE] O_RDWR flags
}
return iter->second.get();
}else if(iter->second->FindPseudoFd(existfd)){
if(newfd){
existfd = iter->second->Dup(existfd);
return fiter->second.get();
}else{
if(fiter->second->FindPseudoFd(existfd)){
if(newfd){
existfd = fiter->second->Dup(existfd);
}
return fiter->second.get();
}
return iter->second.get();
}
}
if(-1 != existfd){
for(iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->FindPseudoFd(existfd)){
for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){
if(iter->second &&
iter->second->GetROPath() == path &&
iter->second->FindPseudoFd(existfd)){
// found opened fd in map
if(iter->second->GetPath() == path){
if(newfd){
existfd = iter->second->Dup(existfd);
}
return iter->second.get();
if(newfd){
existfd = iter->second->Dup(existfd);
}
// found fd, but it is used another file(file descriptor is recycled)
// so returns nullptr.
break;
return iter->second.get();
}
}
}
// If the cache directory is not specified, s3fs opens a temporary file
// when the file is opened.
if(!FdManager::IsCacheDir()){
for(iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == path){
return iter->second.get();
} else {
// If the cache directory is not specified, s3fs opens a temporary file
// when the file is opened.
if(!FdManager::IsCacheDir()){
for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){
if(iter->second && iter->second->IsOpen() && iter->second->GetROPath() == path){
return iter->second.get();
}
}
}
}
return nullptr;
}
FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify)
{
S3FS_PRN_DBG("[path=%s][size=%lld][ts_mctime=%s][flags=0x%x][force_tmpfile=%s][create=%s][ignore_modify=%s]", SAFESTRPTR(path), static_cast<long long>(size), str(ts_mctime).c_str(), flags, (force_tmpfile ? "yes" : "no"), (is_create ? "yes" : "no"), (ignore_modify ? "yes" : "no"));
S3FS_PRN_DBG("[path=%s][size=%lld][ctime=%s,atime=%s,mtime=%s][flags=0x%x][force_tmpfile=%s][create=%s][ignore_modify=%s]", SAFESTRPTR(path), static_cast<long long>(size), str(ts_times.ctime()).c_str(), str(ts_times.atime()).c_str(), str(ts_times.mtime()).c_str(), flags, (force_tmpfile ? "yes" : "no"), (is_create ? "yes" : "no"), (ignore_modify ? "yes" : "no"));
if(!path || '\0' == path[0]){
return nullptr;
}
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
UpdateEntityToTempPath();
// search in mapping by key(path)
fdent_map_t::iterator iter = fent.find(path);
auto iter = fent.find(path);
if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){
// If the cache directory is not specified, s3fs opens a temporary file
// when the file is opened.
@ -606,7 +573,7 @@ FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off
}
// (re)open
if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
if(0 > (fd = ent->Open(pmeta, size, ts_times, flags))){
S3FS_PRN_ERR("failed to (re)open and create new pseudo fd for path(%s).", path);
return nullptr;
}
@ -620,10 +587,10 @@ FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off
return nullptr;
}
// make new obj
std::unique_ptr<FdEntity> ent(new FdEntity(path, cache_path.c_str()));
auto ent = std::make_shared<FdEntity>(path, cache_path.c_str());
// open
if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
if(0 > (fd = ent->Open(pmeta, size, ts_times, flags))){
S3FS_PRN_ERR("failed to open and create new pseudo fd for path(%s) errno:%d.", path, fd);
return nullptr;
}
@ -656,15 +623,30 @@ FdEntity* FdManager::GetExistFdEntity(const char* path, int existfd)
{
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d]", SAFESTRPTR(path), existfd);
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
// search from all entity.
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
if(iter->second && iter->second->FindPseudoFd(existfd)){
// found existfd in entity
return iter->second.get();
UpdateEntityToTempPath();
// If use_cache is disabled, or the disk space is insufficient when use_cache
// is enabled, the corresponding key of the entity in fent is not path.
auto iter = fent.find(std::string(path));
if(fent.end() != iter){
if(iter->second && iter->second->FindPseudoFd(existfd)){
return iter->second.get();
}
} else {
// no matter use_cache is enabled or not, search from all entities to
// find the entity with the same path. And then compare the pseudo fd.
for(iter = fent.begin(); iter != fent.end(); ++iter) {
// GetROPath() holds ro_path_lock rather than fdent_lock.
// Therefore GetExistFdEntity does not contends with FdEntity::Read() / Write().
if(iter->second && (iter->second->GetROPath() == path)
&& iter->second->FindPseudoFd(existfd)) {
return iter->second.get();
}
}
}
// not found entity
return nullptr;
}
@ -674,7 +656,15 @@ FdEntity* FdManager::OpenExistFdEntity(const char* path, int& fd, int flags)
S3FS_PRN_DBG("[path=%s][flags=0x%x]", SAFESTRPTR(path), flags);
// search entity by path, and create pseudo fd
FdEntity* ent = Open(fd, path, nullptr, -1, S3FS_OMIT_TS, flags, false, false, false, AutoLock::NONE);
//
// [NOTE]
// The file timespec is set to UIMTE_OMIT.
// This means that if the file is already open(this method is called
// when expected to be open), the timespecs will not be updated.
// If the file is not open, the current time will be applied.
//
FdEntity* ent = Open(fd, path, nullptr, -1, FileTimes(), flags, false, false, false);
if(!ent){
// Not found entity
return nullptr;
@ -682,10 +672,6 @@ FdEntity* FdManager::OpenExistFdEntity(const char* path, int& fd, int flags)
return ent;
}
// [NOTE]
// Returns the number of open pseudo fd.
// This method is called from GetOpenFdCount method which is already locked.
//
int FdManager::GetPseudoFdCount(const char* path)
{
S3FS_PRN_DBG("[path=%s]", SAFESTRPTR(path));
@ -694,8 +680,10 @@ int FdManager::GetPseudoFdCount(const char* path)
return 0;
}
UpdateEntityToTempPath();
// search from all entity.
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){
if(iter->second && iter->second->GetPath() == path){
// found the entity for the path
return iter->second->GetOpenCount();
@ -707,9 +695,11 @@ int FdManager::GetPseudoFdCount(const char* path)
void FdManager::Rename(const std::string &from, const std::string &to)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
fdent_map_t::iterator iter = fent.find(from);
UpdateEntityToTempPath();
auto iter = fent.find(from);
if(fent.end() == iter && !FdManager::IsCacheDir()){
// If the cache directory is not specified, s3fs opens a temporary file
// when the file is opened.
@ -727,7 +717,7 @@ void FdManager::Rename(const std::string &from, const std::string &to)
// found
S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str());
std::unique_ptr<FdEntity> ent(std::move(iter->second));
auto ent(std::move(iter->second));
// retrieve old fd entity from map
fent.erase(iter);
@ -751,9 +741,11 @@ bool FdManager::Close(FdEntity* ent, int fd)
if(!ent || -1 == fd){
return true; // returns success
}
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
UpdateEntityToTempPath();
for(auto iter = fent.cbegin(); iter != fent.cend(); ++iter){
if(iter->second.get() == ent){
ent->Close(fd);
if(!ent->IsOpen()){
@ -761,7 +753,7 @@ bool FdManager::Close(FdEntity* ent, int fd)
iter = fent.erase(iter);
// check another key name for entity value to be on the safe side
for(; iter != fent.end(); ){
for(; iter != fent.cend(); ){
if(iter->second.get() == ent){
iter = fent.erase(iter);
}else{
@ -775,21 +767,60 @@ bool FdManager::Close(FdEntity* ent, int fd)
return false;
}
bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path)
bool FdManager::ChangeEntityToTempPath(std::shared_ptr<FdEntity> ent, const char* path)
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
// [NOTE]
// If the path element does not exist in fent, it may be because a cache directory
// has not been specified, or FdEntity::NoCacheLoadAndPost has already been called.
// In these cases, the path element(=ent) has already been registered as a TempPath
// element from fent, so there is no need to register ent in the except_fent map.
// (Processing with UpdateEntityToTempPath should not be performed.)
//
{
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){
if(iter->second.get() == ent){
std::string tmppath;
FdManager::MakeRandomTempPath(path, tmppath);
iter->second.reset(ent);
break;
}else{
++iter;
if(fent.cend() == fent.find(path)){
S3FS_PRN_INFO("Already path(%s) element does not exist in fent map.", path);
return false;
}
}
return false;
const std::lock_guard<std::mutex> lock(FdManager::except_entmap_lock);
except_fent[path] = std::move(ent);
return true;
}
bool FdManager::UpdateEntityToTempPath()
{
const std::lock_guard<std::mutex> lock(FdManager::except_entmap_lock);
for(auto except_iter = except_fent.cbegin(); except_iter != except_fent.cend(); ){
std::string tmppath;
FdManager::MakeRandomTempPath(except_iter->first.c_str(), tmppath);
auto iter = fent.find(except_iter->first);
if(fent.cend() != iter && iter->second.get() == except_iter->second.get()){
// Move the entry to the new key
fent[tmppath] = std::move(iter->second);
fent.erase(iter);
except_iter = except_fent.erase(except_iter);
}else{
// [NOTE]
// ChangeEntityToTempPath method is called and the FdEntity pointer
// set into except_fent is mapped into fent.
// And since this method is always called before manipulating fent,
// it will not enter here.
// Thus, if it enters here, a warning is output.
//
S3FS_PRN_WARN("For some reason the FdEntity pointer(for %s) is not found in the fent map. Recovery procedures are being performed, but the cause needs to be identified.", except_iter->first.c_str());
// Add the entry for recovery procedures
fent[tmppath] = except_iter->second;
except_iter = except_fent.erase(except_iter);
}
}
return true;
}
void FdManager::CleanupCacheDir()
@ -800,16 +831,15 @@ void FdManager::CleanupCacheDir()
return;
}
AutoLock auto_lock_no_wait(&FdManager::cache_cleanup_lock, AutoLock::NO_WAIT);
if(auto_lock_no_wait.isLockAcquired()){
if(FdManager::cache_cleanup_lock.try_lock()){
//S3FS_PRN_DBG("cache cleanup started");
CleanupCacheDirInternal("");
//S3FS_PRN_DBG("cache cleanup ended");
}else{
// wait for other thread to finish cache cleanup
AutoLock auto_lock(&FdManager::cache_cleanup_lock);
FdManager::cache_cleanup_lock.lock();
}
FdManager::cache_cleanup_lock.unlock();
}
void FdManager::CleanupCacheDirInternal(const std::string &path)
@ -840,16 +870,18 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
if(S_ISDIR(st.st_mode)){
CleanupCacheDirInternal(next_path);
}else{
AutoLock auto_lock(&FdManager::fd_manager_lock, AutoLock::NO_WAIT);
if (!auto_lock.isLockAcquired()) {
if(!FdManager::fd_manager_lock.try_lock()){
S3FS_PRN_INFO("could not get fd_manager_lock when clean up file(%s), then skip it.", next_path.c_str());
continue;
}
fdent_map_t::iterator iter = fent.find(next_path);
if(fent.end() == iter) {
UpdateEntityToTempPath();
auto iter = fent.find(next_path);
if(fent.cend() == iter) {
S3FS_PRN_DBG("cleaned up: %s", next_path.c_str());
FdManager::DeleteCacheFile(next_path.c_str());
}
FdManager::fd_manager_lock.unlock();
}
}
closedir(dp);
@ -858,8 +890,8 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
bool FdManager::ReserveDiskSpace(off_t size)
{
if(IsSafeDiskSpace(nullptr, size)){
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
free_disk_space += size;
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
FdManager::free_disk_space += size;
return true;
}
return false;
@ -867,8 +899,8 @@ bool FdManager::ReserveDiskSpace(off_t size)
void FdManager::FreeReservedDiskSpace(off_t size)
{
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
free_disk_space -= size;
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
FdManager::free_disk_space -= size;
}
//
@ -952,10 +984,12 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
// check if the target file is currently in operation.
{
AutoLock auto_lock(&FdManager::fd_manager_lock);
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
fdent_map_t::iterator iter = fent.find(object_file_path);
if(fent.end() != iter){
UpdateEntityToTempPath();
auto iter = fent.find(object_file_path);
if(fent.cend() != iter){
// This file is opened now, then we need to put warning message.
strOpenedWarn = CACHEDBG_FMT_WARN_OPEN;
}
@ -985,7 +1019,7 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
// open cache stat file and load page info.
PageList pagelist;
CacheFileStat cfstat(object_file_path.c_str());
if(!cfstat.ReadOnlyOpen() || !pagelist.Serialize(cfstat, false, cache_file_inode)){
if(!cfstat.ReadOnlyOpen() || !pagelist.Deserialize(cfstat, cache_file_inode)){
++err_file_cnt;
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not load cache file stats information");
@ -1011,14 +1045,14 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
if(!warn_area_list.empty()){
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_WARN_HEAD);
for(fdpage_list_t::const_iterator witer = warn_area_list.begin(); witer != warn_area_list.end(); ++witer){
for(auto witer = warn_area_list.cbegin(); witer != warn_area_list.cend(); ++witer){
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, static_cast<size_t>(witer->offset), static_cast<size_t>(witer->bytes));
}
}
if(!err_area_list.empty()){
++err_file_cnt;
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_ERR_HEAD);
for(fdpage_list_t::const_iterator eiter = err_area_list.begin(); eiter != err_area_list.end(); ++eiter){
for(auto eiter = err_area_list.cbegin(); eiter != err_area_list.cend(); ++eiter){
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_PROB_BLOCK, static_cast<size_t>(eiter->offset), static_cast<size_t>(eiter->bytes));
}
}
@ -1045,11 +1079,13 @@ bool FdManager::CheckAllCache()
return false;
}
std::unique_ptr<FILE, decltype(&s3fs_fclose)> pfp(nullptr, &s3fs_fclose);
FILE* fp;
if(FdManager::check_cache_output.empty()){
fp = stdout;
}else{
if(nullptr == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){
pfp.reset(fp = fopen(FdManager::check_cache_output.c_str(), "a+"));
if(nullptr == pfp){
S3FS_PRN_ERR("Could not open(create) output file(%s) for checking all cache by errno(%d)", FdManager::check_cache_output.c_str(), errno);
return false;
}
@ -1071,10 +1107,6 @@ bool FdManager::CheckAllCache()
// print foot message
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FOOT, total_file_cnt, err_file_cnt, err_dir_cnt);
if(stdout != fp){
fclose(fp);
}
return result;
}

View File

@ -21,7 +21,12 @@
#ifndef S3FS_FDCACHE_H_
#define S3FS_FDCACHE_H_
#include <mutex>
#include <string>
#include "common.h"
#include "fdcache_entity.h"
#include "s3fs_util.h"
//------------------------------------------------
// class FdManager
@ -30,34 +35,42 @@ class FdManager
{
private:
static FdManager singleton;
static pthread_mutex_t fd_manager_lock;
static pthread_mutex_t cache_cleanup_lock;
static pthread_mutex_t reserved_diskspace_lock;
static bool is_lock_init;
static std::mutex fd_manager_lock;
static std::mutex cache_cleanup_lock;
static std::mutex reserved_diskspace_lock;
static std::mutex except_entmap_lock;
static std::string cache_dir;
static bool check_cache_dir_exist;
static off_t free_disk_space; // limit free disk space
static off_t fake_used_disk_space; // difference between fake free disk space and actual at startup(for test/debug)
static off_t free_disk_space GUARDED_BY(reserved_diskspace_lock); // limit free disk space
static off_t fake_used_disk_space GUARDED_BY(reserved_diskspace_lock); // difference between fake free disk space and actual at startup(for test/debug)
static std::string check_cache_output;
static bool checked_lseek;
static bool have_lseek_hole;
static std::string tmp_dir;
fdent_map_t fent;
fdent_map_t fent GUARDED_BY(fd_manager_lock);
fdent_map_t except_fent GUARDED_BY(except_entmap_lock); // A map of delayed deletion fdentity
private:
static off_t GetFreeDiskSpace(const char* path);
static off_t GetFreeDiskSpaceHasLock(const char* path) REQUIRES(FdManager::reserved_diskspace_lock);
static off_t GetTotalDiskSpace(const char* path);
static bool IsDir(const std::string* dir);
static bool IsDir(const std::string& dir);
static int GetVfsStat(const char* path, struct statvfs* vfsbuf);
static off_t GetEnsureFreeDiskSpaceHasLock() REQUIRES(FdManager::reserved_diskspace_lock);
int GetPseudoFdCount(const char* path);
void CleanupCacheDirInternal(const std::string &path = "");
// Returns the number of open pseudo fd.
int GetPseudoFdCount(const char* path) REQUIRES(fd_manager_lock);
bool UpdateEntityToTempPath() REQUIRES(fd_manager_lock);
void CleanupCacheDirInternal(const std::string &path = "") REQUIRES(cache_cleanup_lock);
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
public:
FdManager();
~FdManager();
FdManager(const FdManager&) = delete;
FdManager(FdManager&&) = delete;
FdManager& operator=(const FdManager&) = delete;
FdManager& operator=(FdManager&&) = delete;
// Reference singleton
static FdManager* get() { return &singleton; }
@ -76,27 +89,34 @@ class FdManager
static bool CheckCacheDirExist();
static bool HasOpenEntityFd(const char* path);
static int GetOpenFdCount(const char* path);
static off_t GetEnsureFreeDiskSpace();
static off_t GetEnsureFreeDiskSpace()
{
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
return FdManager::GetEnsureFreeDiskSpaceHasLock();
}
static off_t SetEnsureFreeDiskSpace(off_t size);
static bool InitFakeUsedDiskSize(off_t fake_freesize);
static bool IsSafeDiskSpace(const char* path, off_t size);
static bool IsSafeDiskSpaceWithLog(const char* path, off_t size);
static bool IsSafeDiskSpace(const char* path, off_t size, bool withmsg = false);
static void FreeReservedDiskSpace(off_t size);
static bool ReserveDiskSpace(off_t size);
static bool HaveLseekHole();
static bool SetTmpDir(const char* dir);
static bool CheckTmpDirExist();
static FILE* MakeTempFile();
static std::unique_ptr<FILE, decltype(&s3fs_fclose)> MakeTempFile();
static off_t GetTotalDiskSpaceByRatio(int ratio);
// Return FdEntity associated with path, returning nullptr on error. This operation increments the reference count; callers must decrement via Close after use.
FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true, AutoLock::Type locktype = AutoLock::NONE);
FdEntity* Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true) {
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
return GetFdEntityHasLock(path, existfd, newfd);
}
FdEntity* GetFdEntityHasLock(const char* path, int& existfd, bool newfd = true) REQUIRES(FdManager::fd_manager_lock);
FdEntity* Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify);
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
FdEntity* OpenExistFdEntity(const char* path, int& fd, int flags = O_RDONLY);
void Rename(const std::string &from, const std::string &to);
bool Close(FdEntity* ent, int fd);
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
bool ChangeEntityToTempPath(std::shared_ptr<FdEntity> ent, const char* path);
void CleanupCacheDir();
bool CheckAllCache();

View File

@ -78,11 +78,11 @@ FdEntity* AutoFdEntity::Attach(const char* path, int existfd)
return pFdEntity;
}
FdEntity* AutoFdEntity::Open(const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type, int* error)
FdEntity* AutoFdEntity::Open(const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, int* error)
{
Close();
if(nullptr == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, ts_mctime, flags, force_tmpfile, is_create, ignore_modify, type))){
if(nullptr == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, ts_times, flags, force_tmpfile, is_create, ignore_modify))){
if(error){
*error = pseudo_fd;
}

View File

@ -23,8 +23,9 @@
#include <fcntl.h>
#include "autolock.h"
#include "common.h"
#include "metaheader.h"
#include "filetimes.h"
class FdEntity;
@ -42,22 +43,20 @@ class AutoFdEntity
FdEntity* pFdEntity;
int pseudo_fd;
private:
public:
AutoFdEntity();
~AutoFdEntity();
AutoFdEntity(const AutoFdEntity&) = delete;
AutoFdEntity(AutoFdEntity&&) = delete;
AutoFdEntity& operator=(const AutoFdEntity&) = delete;
AutoFdEntity& operator=(AutoFdEntity&&) = delete;
public:
AutoFdEntity();
~AutoFdEntity();
bool Close();
int Detach();
FdEntity* Attach(const char* path, int existfd);
int GetPseudoFd() const { return pseudo_fd; }
FdEntity* Open(const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type, int* error = nullptr);
FdEntity* Open(const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, int* error = nullptr);
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
FdEntity* OpenExistFdEntity(const char* path, int flags = O_RDONLY);
};

File diff suppressed because it is too large Load Diff

View File

@ -21,19 +21,29 @@
#ifndef S3FS_FDCACHE_ENTITY_H_
#define S3FS_FDCACHE_ENTITY_H_
#include <cstdint>
#include <fcntl.h>
#include <memory>
#include <mutex>
#include <string>
#include "autolock.h"
#include "common.h"
#include "fdcache_page.h"
#include "fdcache_fdinfo.h"
#include "fdcache_untreated.h"
#include "metaheader.h"
#include "s3fs_util.h"
#include "filetimes.h"
//----------------------------------------------
// Typedef
//----------------------------------------------
class PseudoFdInfo;
typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
//------------------------------------------------
// class FdEntity
//------------------------------------------------
class FdEntity
class FdEntity : public std::enable_shared_from_this<FdEntity>
{
private:
// [NOTE]
@ -41,7 +51,7 @@ class FdEntity
// because the processing(request) at these updates is different.
// Therefore, the pending state is expressed by this enum type.
//
enum class pending_status_t {
enum class pending_status_t : uint8_t {
NO_UPDATE_PENDING = 0,
UPDATE_META_PENDING, // pending meta header
CREATE_FILE_PENDING // pending file creation and meta header
@ -50,54 +60,65 @@ class FdEntity
static bool mixmultipart; // whether multipart uploading can use copy api.
static bool streamupload; // whether stream uploading.
mutable pthread_mutex_t fdent_lock;
bool is_lock_init;
std::string path; // object path
int physical_fd; // physical file(cache or temporary file) descriptor
UntreatedParts untreated_list; // list of untreated parts that have been written and not yet uploaded(for streamupload)
fdinfo_map_t pseudo_fd_map; // pseudo file descriptor information map
FILE* pfile; // file pointer(tmp file or cache file)
ino_t inode; // inode number for cache file
headers_t orgmeta; // original headers at opening
off_t size_orgmeta; // original file size in original headers
mutable std::mutex fdent_lock;
std::string path GUARDED_BY(fdent_lock); // object path
int physical_fd GUARDED_BY(fdent_lock); // physical file(cache or temporary file) descriptor
UntreatedParts untreated_list GUARDED_BY(fdent_lock); // list of untreated parts that have been written and not yet uploaded(for streamupload)
fdinfo_map_t pseudo_fd_map GUARDED_BY(fdent_lock); // pseudo file descriptor information map
std::unique_ptr<FILE, decltype(&s3fs_fclose)> pfile GUARDED_BY(fdent_lock) = {nullptr, &s3fs_fclose}; // file pointer(tmp file or cache file)
ino_t inode GUARDED_BY(fdent_lock); // inode number for cache file
headers_t orgmeta GUARDED_BY(fdent_lock); // original headers at opening
off_t size_orgmeta GUARDED_BY(fdent_lock); // original file size in original headers
mutable pthread_mutex_t fdent_data_lock;// protects the following members
PageList pagelist;
std::string cachepath; // local cache file path
// (if this is empty, does not load/save pagelist.)
std::string mirrorpath; // mirror file path to local cache file path
pending_status_t pending_status;// status for new file creation and meta update
struct timespec holding_mtime; // if mtime is updated while the file is open, it is set time_t value
mutable std::mutex fdent_data_lock ACQUIRED_AFTER(fdent_lock); // protects the following members
PageList pagelist GUARDED_BY(fdent_data_lock);
std::string cachepath GUARDED_BY(fdent_data_lock); // local cache file path
// (if this is empty, does not load/save pagelist.)
std::string mirrorpath GUARDED_BY(fdent_data_lock); // mirror file path to local cache file path
pending_status_t pending_status GUARDED_BY(fdent_data_lock); // status for new file creation and meta update
FileTimes timestamps GUARDED_BY(fdent_data_lock); // file timestamps(atime/ctime/mtime)
mutable std::mutex ro_path_lock; // for only the ro_path variable
std::string ro_path GUARDED_BY(ro_path_lock); // holds the same value as "path". this is used as a backup(read-only variable) by special functions only.
private:
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
static ino_t GetInode(int fd);
void Clear();
ino_t GetInode() const;
int OpenMirrorFile();
int NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start = 0, off_t size = 0); // size=0 means loading to end
PseudoFdInfo* CheckPseudoFdFlags(int fd, bool writable, AutoLock::Type locktype = AutoLock::NONE);
bool IsUploading(AutoLock::Type locktype = AutoLock::NONE);
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
bool SetAllStatusUnloaded() { return SetAllStatus(false); }
int NoCachePreMultipartPost(PseudoFdInfo* pseudo_obj);
int NoCacheMultipartPost(PseudoFdInfo* pseudo_obj, int tgfd, off_t start, off_t size);
int NoCacheCompleteMultipartPost(PseudoFdInfo* pseudo_obj);
int RowFlushNoMultipart(const PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
int RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
ssize_t WriteNoMultipart(const PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteMixMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ssize_t WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
ino_t GetInode() const REQUIRES(FdEntity::fdent_data_lock);
int OpenMirrorFile() REQUIRES(FdEntity::fdent_data_lock);
int NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start = 0, off_t size = 0) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock); // size=0 means loading to end
PseudoFdInfo* CheckPseudoFdFlags(int fd, bool writable) REQUIRES(FdEntity::fdent_lock);
bool IsUploading() REQUIRES(FdEntity::fdent_lock);
int SetCtimeHasLock(struct timespec time) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int SetAtimeHasLock(struct timespec time) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int SetMtimeHasLock(struct timespec time) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int SetFileTimesHasLock(const FileTimes& ts_times) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
bool SetAllStatus(bool is_loaded) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
bool SetAllStatusUnloaded() REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock) { return SetAllStatus(false); }
int PreMultipartUploadRequest(PseudoFdInfo* pseudo_obj) REQUIRES(FdEntity::fdent_lock, fdent_data_lock);
int NoCachePreMultipartUploadRequest(PseudoFdInfo* pseudo_obj) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int NoCacheMultipartUploadRequest(PseudoFdInfo* pseudo_obj, int tgfd, off_t start, off_t size) REQUIRES(FdEntity::fdent_lock);
int NoCacheMultipartUploadComplete(PseudoFdInfo* pseudo_obj) REQUIRES(FdEntity::fdent_lock);
int RowFlushHasLock(int fd, const char* tpath, bool force_sync) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int RowFlushNoMultipart(const PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int RowFlushMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
int RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
ssize_t WriteNoMultipart(const PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
ssize_t WriteMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
ssize_t WriteMixMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
ssize_t WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
bool ReserveDiskSpace(off_t size);
int UploadPendingHasLock(int fd) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
bool AddUntreated(off_t start, off_t size);
bool ReserveDiskSpace(off_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
bool IsDirtyMetadata() const;
bool AddUntreated(off_t start, off_t size) REQUIRES(FdEntity::fdent_lock);
bool IsDirtyMetadata() const REQUIRES(FdEntity::fdent_data_lock);
std::shared_ptr<FdEntity> get_shared_ptr() { return shared_from_this(); }
public:
static bool GetNoMixMultipart() { return mixmultipart; }
@ -113,43 +134,108 @@ class FdEntity
FdEntity& operator=(FdEntity&&) = delete;
void Close(int fd);
bool IsOpen() const { return (-1 != physical_fd); }
bool FindPseudoFd(int fd, AutoLock::Type locktype = AutoLock::NONE) const;
int Open(const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, AutoLock::Type type);
bool LoadAll(int fd, headers_t* pmeta = nullptr, off_t* size = nullptr, bool force_load = false);
int Dup(int fd, AutoLock::Type locktype = AutoLock::NONE);
int OpenPseudoFd(int flags = O_RDONLY, AutoLock::Type locktype = AutoLock::NONE);
int GetOpenCount(AutoLock::Type locktype = AutoLock::NONE) const;
const std::string& GetPath() const { return path; }
bool IsOpen() const {
const std::lock_guard<std::mutex> lock(fdent_lock);
return (-1 != physical_fd);
}
bool FindPseudoFd(int fd) const {
const std::lock_guard<std::mutex> lock(fdent_lock);
return FindPseudoFdWithLock(fd);
}
bool FindPseudoFdWithLock(int fd) const REQUIRES(FdEntity::fdent_lock);
std::string GetROPath() const {
const std::lock_guard<std::mutex> ro_lock(ro_path_lock);
return ro_path;
}
int Open(const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags);
bool LoadAll(int fd, off_t* size = nullptr, bool force_load = false);
int Dup(int fd) {
const std::lock_guard<std::mutex> lock(fdent_lock);
return DupWithLock(fd);
}
int DupWithLock(int fd) REQUIRES(FdEntity::fdent_lock);
int OpenPseudoFd(int flags = O_RDONLY);
int GetOpenCount() const {
const std::lock_guard<std::mutex> lock(fdent_lock);
return GetOpenCountHasLock();
}
int GetOpenCountHasLock() const REQUIRES(FdEntity::fdent_lock);
std::string GetPath() const
{
const std::lock_guard<std::mutex> lock(fdent_lock);
return path;
}
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
int GetPhysicalFd() const { return physical_fd; }
int GetPhysicalFd() const REQUIRES(FdEntity::fdent_lock) { return physical_fd; }
bool IsModified() const;
bool MergeOrgMeta(headers_t& updatemeta);
int UploadPending(int fd, AutoLock::Type type);
bool GetOrgMeta(headers_t& meta) const;
int UploadPending(int fd) {
const std::lock_guard<std::mutex> lock(fdent_lock);
const std::lock_guard<std::mutex> lock_data(fdent_data_lock);
return UploadPendingHasLock(fd);
}
bool HaveUploadPending(){
const std::lock_guard<std::mutex> lock_data(fdent_data_lock);
return (pending_status_t::NO_UPDATE_PENDING != pending_status);
}
bool GetStats(struct stat& st) const {
const std::lock_guard<std::mutex> lock(fdent_lock);
return GetStatsHasLock(st);
}
bool GetStatsHasLock(struct stat& st) const REQUIRES(FdEntity::fdent_lock);
int SetCtime(struct timespec time) {
const std::lock_guard<std::mutex> lock(fdent_lock);
const std::lock_guard<std::mutex> lock2(fdent_data_lock);
return SetCtimeHasLock(time);
}
int SetAtime(struct timespec time) {
const std::lock_guard<std::mutex> lock(fdent_lock);
const std::lock_guard<std::mutex> lock2(fdent_data_lock);
return SetAtimeHasLock(time);
}
int SetMtime(struct timespec time) {
const std::lock_guard<std::mutex> lock(fdent_lock);
const std::lock_guard<std::mutex> lock2(fdent_data_lock);
return SetMtimeHasLock(time);
}
bool GetStats(struct stat& st, AutoLock::Type locktype = AutoLock::NONE) const;
int SetCtime(struct timespec time, AutoLock::Type locktype = AutoLock::NONE);
int SetAtime(struct timespec time, AutoLock::Type locktype = AutoLock::NONE);
int SetMCtime(struct timespec mtime, struct timespec ctime, AutoLock::Type locktype = AutoLock::NONE);
bool UpdateCtime();
bool UpdateAtime();
bool UpdateMtime(bool clear_holding_mtime = false);
bool UpdateMCtime();
bool SetHoldingMtime(struct timespec mtime, AutoLock::Type locktype = AutoLock::NONE);
bool ClearHoldingMtime(AutoLock::Type locktype = AutoLock::NONE);
bool GetSize(off_t& size) const;
bool GetXattr(std::string& xattr) const;
bool SetXattr(const std::string& xattr);
bool SetMode(mode_t mode);
bool SetUId(uid_t uid);
bool SetGId(gid_t gid);
bool SetMode(mode_t mode) {
const std::lock_guard<std::mutex> lock(fdent_lock);
return SetModeHasLock(mode);
}
bool SetModeHasLock(mode_t mode) REQUIRES(FdEntity::fdent_lock);
bool SetUId(uid_t uid) {
const std::lock_guard<std::mutex> lock(fdent_lock);
return SetUIdHasLock(uid);
}
bool SetUIdHasLock(uid_t uid) REQUIRES(FdEntity::fdent_lock);
bool SetGId(gid_t gid) {
const std::lock_guard<std::mutex> lock(fdent_lock);
return SetGIdHasLock(gid);
}
bool SetGIdHasLock(gid_t gid) REQUIRES(FdEntity::fdent_lock);
bool SetContentType(const char* path);
bool GetStatsFromMeta(struct stat& st) const;
int Load(off_t start, off_t size, AutoLock::Type type, bool is_modified_flag = false); // size=0 means loading to end
int Load(off_t start, off_t size, bool is_modified_flag = false) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock); // size=0 means loading to end
off_t BytesModified();
int RowFlush(int fd, const char* tpath, AutoLock::Type type, bool force_sync = false);
int Flush(int fd, AutoLock::Type type, bool force_sync = false) { return RowFlush(fd, nullptr, type, force_sync); }
int RowFlush(int fd, const char* tpath, bool force_sync = false) {
const std::lock_guard<std::mutex> lock(fdent_lock);
const std::lock_guard<std::mutex> lock_data(fdent_data_lock);
return RowFlushHasLock(fd, tpath, force_sync);
}
int Flush(int fd, bool force_sync = false) {
return RowFlush(fd, nullptr, force_sync);
}
ssize_t Read(int fd, char* bytes, off_t start, size_t size, bool force_load = false);
ssize_t Write(int fd, const char* bytes, off_t start, size_t size);
@ -160,11 +246,14 @@ class FdEntity
bool IsDirtyNewFile() const;
void MarkDirtyMetadata();
bool GetLastUpdateUntreatedPart(off_t& start, off_t& size) const;
bool ReplaceLastUpdateUntreatedPart(off_t front_start, off_t front_size, off_t behind_start, off_t behind_size);
bool GetLastUpdateUntreatedPart(off_t& start, off_t& size) const REQUIRES(FdEntity::fdent_lock);
bool ReplaceLastUpdateUntreatedPart(off_t front_start, off_t front_size, off_t behind_start, off_t behind_size) REQUIRES(FdEntity::fdent_lock);
// Intentionally unimplemented -- for lock checking only.
std::mutex* GetMutex() RETURN_CAPABILITY(fdent_lock);
};
typedef std::map<std::string, std::unique_ptr<FdEntity>> fdent_map_t; // key=path, value=FdEntity*
typedef std::map<std::string, std::shared_ptr<FdEntity>> fdent_map_t; // key=path, value=FdEntity
#endif // S3FS_FDCACHE_ENTITY_H_

View File

@ -23,10 +23,13 @@
#include <cstdio>
#include <cstdlib>
#include <memory>
#include <mutex>
#include <string>
#include <sys/stat.h>
#include <unistd.h>
#include "common.h"
#include "s3fs.h"
#include "s3fs_logger.h"
#include "s3fs_util.h"
#include "fdcache_fdinfo.h"
@ -35,90 +38,13 @@
#include "curl.h"
#include "string_util.h"
#include "threadpoolman.h"
//------------------------------------------------
// PseudoFdInfo class variables
//------------------------------------------------
int PseudoFdInfo::max_threads = -1;
int PseudoFdInfo::opt_max_threads = -1;
//------------------------------------------------
// PseudoFdInfo class methods
//------------------------------------------------
//
// Worker function for uploading
//
void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
{
std::unique_ptr<pseudofdinfo_thparam> pthparam(static_cast<pseudofdinfo_thparam*>(arg));
if(!pthparam || !(pthparam->ppseudofdinfo)){
return reinterpret_cast<void*>(-EIO);
}
S3FS_PRN_INFO3("Upload Part Thread [tpath=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
int result;
{
AutoLock auto_lock(&(pthparam->ppseudofdinfo->upload_list_lock));
if(0 != (result = pthparam->ppseudofdinfo->last_result)){
S3FS_PRN_DBG("Already occurred error, thus this thread worker is exiting.");
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::ALREADY_LOCKED)){ // result will be overwritten with the same value.
result = -EIO;
}
return reinterpret_cast<void*>(result);
}
}
// setup and make curl object
std::unique_ptr<S3fsCurl> s3fscurl(S3fsCurl::CreateParallelS3fsCurl(pthparam->path.c_str(), pthparam->upload_fd, pthparam->start, pthparam->size, pthparam->part_num, pthparam->is_copy, pthparam->petag, pthparam->upload_id, result));
if(nullptr == s3fscurl){
S3FS_PRN_ERR("failed creating s3fs curl object for uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
// set result for exiting
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
result = -EIO;
}
return reinterpret_cast<void*>(result);
}
// Send request and get result
if(0 == (result = s3fscurl->RequestPerform())){
S3FS_PRN_DBG("succeed uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
if(!s3fscurl->MixMultipartPostComplete()){
S3FS_PRN_ERR("failed completion uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
result = -EIO;
}
}else{
S3FS_PRN_ERR("failed uploading with error(%d) [path=%s][start=%lld][size=%lld][part=%d]", result, pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
}
s3fscurl->DestroyCurlHandle(true, false);
// set result
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
S3FS_PRN_WARN("This thread worker is about to end, so it doesn't return an EIO here and runs to the end.");
}
return reinterpret_cast<void*>(result);
}
#include "s3fs_threadreqs.h"
//------------------------------------------------
// PseudoFdInfo methods
//------------------------------------------------
PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(fd), flags(0), upload_fd(-1), uploaded_sem(0), instruct_count(0), completed_count(0), last_result(0)
PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(fd), flags(0), upload_fd(-1), instruct_count(0), last_result(0), uploaded_sem(0)
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&upload_list_lock, &attr))){
S3FS_PRN_CRIT("failed to init upload_list_lock: %d", result);
abort();
}
is_lock_init = true;
if(-1 != physical_fd){
pseudo_fd = PseudoFdManager::Get();
flags = open_flags;
@ -127,25 +53,24 @@ PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(
PseudoFdInfo::~PseudoFdInfo()
{
Clear(); // call before destrying the mutex
if(is_lock_init){
int result;
if(0 != (result = pthread_mutex_destroy(&upload_list_lock))){
S3FS_PRN_CRIT("failed to destroy upload_list_lock: %d", result);
abort();
}
is_lock_init = false;
}
Clear(); // call before destroying the mutex
}
bool PseudoFdInfo::Clear()
{
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!CancelAllThreads() || !ResetUploadInfo(AutoLock::NONE)){
if(!CancelAllThreads()){
return false;
}
{
const std::lock_guard<std::mutex> lock(upload_list_lock);
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!ResetUploadInfo()){
return false;
}
}
CloseUploadFd();
if(-1 != pseudo_fd){
@ -157,18 +82,29 @@ bool PseudoFdInfo::Clear()
return true;
}
bool PseudoFdInfo::IsUploadingHasLock() const
{
return !upload_id.empty();
}
bool PseudoFdInfo::IsUploading() const
{
const std::lock_guard<std::mutex> lock(upload_list_lock);
return IsUploadingHasLock();
}
void PseudoFdInfo::CloseUploadFd()
{
AutoLock auto_lock(&upload_list_lock);
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(-1 != upload_fd){
close(upload_fd);
}
}
bool PseudoFdInfo::OpenUploadFd(AutoLock::Type type)
bool PseudoFdInfo::OpenUploadFd()
{
AutoLock auto_lock(&upload_list_lock, type);
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(-1 != upload_fd){
// already initialized
@ -244,29 +180,23 @@ bool PseudoFdInfo::ClearUploadInfo(bool is_cancel_mp)
return false;
}
}
return ResetUploadInfo(AutoLock::NONE);
const std::lock_guard<std::mutex> lock(upload_list_lock);
return ResetUploadInfo();
}
bool PseudoFdInfo::ResetUploadInfo(AutoLock::Type type)
bool PseudoFdInfo::ResetUploadInfo()
{
AutoLock auto_lock(&upload_list_lock, type);
upload_id.erase();
upload_id.clear();
upload_list.clear();
instruct_count = 0;
completed_count = 0;
last_result = 0;
return true;
}
bool PseudoFdInfo::RowInitialUploadInfo(const std::string& id, bool is_cancel_mp, AutoLock::Type type)
bool PseudoFdInfo::RowInitialUploadInfo(const std::string& id, bool is_cancel_mp)
{
if(is_cancel_mp && AutoLock::ALREADY_LOCKED == type){
S3FS_PRN_ERR("Internal Error: Could not call this with type=AutoLock::ALREADY_LOCKED and is_cancel_mp=true");
return false;
}
if(is_cancel_mp){
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
@ -274,57 +204,55 @@ bool PseudoFdInfo::RowInitialUploadInfo(const std::string& id, bool is_cancel_mp
return false;
}
}else{
const std::lock_guard<std::mutex> lock(upload_list_lock);
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!ResetUploadInfo(type)){
if(!ResetUploadInfo()){
return false;
}
}
AutoLock auto_lock(&upload_list_lock, type);
const std::lock_guard<std::mutex> lock(upload_list_lock);
upload_id = id;
return true;
}
bool PseudoFdInfo::CompleteInstruction(int result, AutoLock::Type type)
void PseudoFdInfo::IncreaseInstructionCount()
{
AutoLock auto_lock(&upload_list_lock, type);
const std::lock_guard<std::mutex> lock(upload_list_lock);
++instruct_count;
}
if(0 != result){
last_result = result;
}
bool PseudoFdInfo::GetUploadInfo(std::string& id, int& fd) const
{
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(0 >= instruct_count){
S3FS_PRN_ERR("Internal error: instruct_count caused an underflow.");
if(!IsUploadingHasLock()){
S3FS_PRN_ERR("Multipart Upload has not started yet.");
return false;
}
--instruct_count;
++completed_count;
id = upload_id;
fd = upload_fd;
return true;
}
bool PseudoFdInfo::GetUploadId(std::string& id) const
{
if(!IsUploading()){
S3FS_PRN_ERR("Multipart Upload has not started yet.");
return false;
}
id = upload_id;
return true;
int fd = -1;
return GetUploadInfo(id, fd);
}
bool PseudoFdInfo::GetEtaglist(etaglist_t& list) const
{
if(!IsUploading()){
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(!IsUploadingHasLock()){
S3FS_PRN_ERR("Multipart Upload has not started yet.");
return false;
}
AutoLock auto_lock(&upload_list_lock);
list.clear();
for(filepart_list_t::const_iterator iter = upload_list.begin(); iter != upload_list.end(); ++iter){
for(auto iter = upload_list.cbegin(); iter != upload_list.cend(); ++iter){
if(iter->petag){
list.push_back(*(iter->petag));
}else{
@ -344,12 +272,13 @@ bool PseudoFdInfo::GetEtaglist(etaglist_t& list) const
//
bool PseudoFdInfo::AppendUploadPart(off_t start, off_t size, bool is_copy, etagpair** ppetag)
{
if(!IsUploading()){
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(!IsUploadingHasLock()){
S3FS_PRN_ERR("Multipart Upload has not started yet.");
return false;
}
AutoLock auto_lock(&upload_list_lock);
off_t next_start_pos = 0;
if(!upload_list.empty()){
next_start_pos = upload_list.back().startpos + upload_list.back().size;
@ -382,11 +311,13 @@ static bool filepart_partnum_compare(const filepart& src1, const filepart& src2)
return src1.get_part_number() < src2.get_part_number();
}
bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type)
bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag)
{
const std::lock_guard<std::mutex> lock(upload_list_lock);
//S3FS_PRN_DBG("[start=%lld][size=%lld][part_num=%d][is_copy=%s]", static_cast<long long int>(start), static_cast<long long int>(size), part_num, (is_copy ? "true" : "false"));
if(!IsUploading()){
if(!IsUploadingHasLock()){
S3FS_PRN_ERR("Multipart Upload has not started yet.");
return false;
}
@ -395,8 +326,6 @@ bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool
return false;
}
AutoLock auto_lock(&upload_list_lock, type);
// insert new part
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, part_num));
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
@ -410,57 +339,44 @@ bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool
return true;
}
// [NOTE]
// This method only launches the upload thread.
// Check the maximum number of threads before calling.
//
bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy, AutoLock::Type type)
bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy)
{
//S3FS_PRN_DBG("[path=%s][mplist(%zu)]", SAFESTRPTR(path), mplist.size());
AutoLock auto_lock(&upload_list_lock, type);
if(mplist.empty()){
// nothing to do
return true;
}
if(!OpenUploadFd(AutoLock::ALREADY_LOCKED)){
if(!OpenUploadFd()){
return false;
}
for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){
// Get upload id/fd before loop
std::string tmp_upload_id;
int tmp_upload_fd = -1;
if(!GetUploadInfo(tmp_upload_id, tmp_upload_fd)){
return false;
}
std::string strpath = SAFESTRPTR(path);
for(auto iter = mplist.cbegin(); iter != mplist.cend(); ++iter){
// Insert upload part
etagpair* petag = nullptr;
if(!InsertUploadPart(iter->start, iter->size, iter->part_num, is_copy, &petag, AutoLock::ALREADY_LOCKED)){
S3FS_PRN_ERR("Failed to insert insert upload part(path=%s, start=%lld, size=%lld, part=%d, copy=%s) to mplist", SAFESTRPTR(path), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
if(!InsertUploadPart(iter->start, iter->size, iter->part_num, is_copy, &petag)){
S3FS_PRN_ERR("Failed to insert Multipart Upload Part to mplist [path=%s][start=%lld][size=%lld][part_num=%d][is_copy=%s]", strpath.c_str(), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
return false;
}
// make parameter for my thread
pseudofdinfo_thparam* thargs = new pseudofdinfo_thparam;
thargs->ppseudofdinfo = this;
thargs->path = SAFESTRPTR(path);
thargs->upload_id = upload_id;
thargs->upload_fd = upload_fd;
thargs->start = iter->start;
thargs->size = iter->size;
thargs->is_copy = is_copy;
thargs->part_num = iter->part_num;
thargs->petag = petag;
// make parameter for thread pool
std::unique_ptr<thpoolman_param> ppoolparam(new thpoolman_param);
ppoolparam->args = thargs;
ppoolparam->psem = &uploaded_sem;
ppoolparam->pfunc = PseudoFdInfo::MultipartUploadThreadWorker;
// setup instruction
if(!ThreadPoolMan::Instruct(std::move(ppoolparam))){
S3FS_PRN_ERR("failed setup instruction for uploading.");
delete thargs;
// setup instruction and request on another thread
int result;
if(0 != (result = multipart_upload_part_request(strpath, tmp_upload_fd, iter->start, iter->size, iter->part_num, tmp_upload_id, petag, is_copy, &uploaded_sem, &upload_list_lock, &last_result))){
S3FS_PRN_ERR("failed setup instruction for Multipart Upload Part Request by error(%d) [path=%s][start=%lld][size=%lld][part_num=%d][is_copy=%s]", result, strpath.c_str(), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
return false;
}
++instruct_count;
// Count up the number of internally managed threads
IncreaseInstructionCount();
}
return true;
}
@ -471,11 +387,10 @@ bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_li
result = 0;
if(!OpenUploadFd(AutoLock::NONE)){
if(!OpenUploadFd()){
return false;
}
if(!ParallelMultipartUpload(path, to_upload_list, false, AutoLock::NONE) || !ParallelMultipartUpload(path, copy_list, true, AutoLock::NONE)){
if(!ParallelMultipartUpload(path, to_upload_list, false) || !ParallelMultipartUpload(path, copy_list, true)){
S3FS_PRN_ERR("Failed setup instruction for uploading(path=%s, to_upload_list=%zu, copy_list=%zu).", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
return false;
}
@ -486,6 +401,31 @@ bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_li
return true;
}
//
// Common method that calls S3fsCurl::PreMultipartUploadRequest via pre_multipart_upload_request
//
// [NOTE]
// If the request is successful, initialize upload_id.
//
int PseudoFdInfo::PreMultipartUploadRequest(const std::string& strpath, const headers_t& meta)
{
// get upload_id
std::string new_upload_id;
int result;
if(0 != (result = pre_multipart_upload_request(strpath, meta, new_upload_id))){
return result;
}
// reset upload_id
if(!RowInitialUploadInfo(new_upload_id, false/* not need to cancel */)){
S3FS_PRN_ERR("failed to setup multipart upload(set upload id to object)");
return -EIO;
}
S3FS_PRN_DBG("succeed to setup multipart upload(set upload id to object)");
return 0;
}
//
// Upload the last updated Untreated area
//
@ -511,7 +451,6 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_
S3FS_PRN_ERR("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not writable, or pfdent is nullptr.", pseudo_fd, physical_fd, path);
return -EBADF;
}
AutoLock auto_lock(&upload_list_lock);
//
// Get last update untreated area
@ -549,7 +488,7 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_
// Get the area for uploading, if last update treated area can be uploaded.
//
// [NOTE]
// * Create the updoad area list, if the untreated area aligned with the boundary
// * Create the upload area list, if the untreated area aligned with the boundary
// exceeds the maximum upload size.
// * If it overlaps with an area that has already been uploaded(unloaded list),
// that area is added to the cancellation list and included in the untreated area.
@ -569,17 +508,9 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_
// Has multipart uploading already started?
//
if(!IsUploading()){
// Multipart uploading hasn't started yet, so start it.
//
S3fsCurl s3fscurl(true);
std::string tmp_upload_id;
std::string strpath = SAFESTRPTR(path);
int result;
if(0 != (result = s3fscurl.PreMultipartPostRequest(path, meta, tmp_upload_id, true))){
S3FS_PRN_ERR("failed to setup multipart upload(create upload id) by errno(%d)", result);
return result;
}
if(!RowInitialUploadInfo(tmp_upload_id, false/* not need to cancel */, AutoLock::ALREADY_LOCKED)){
S3FS_PRN_ERR("failed to setup multipart upload(set upload id to object)");
if(0 != (result = PreMultipartUploadRequest(strpath, meta))){
return result;
}
}
@ -590,7 +521,7 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_
// When canceling(overwriting) a part that has already been uploaded, output it.
//
if(S3fsLog::IsS3fsLogDbg()){
for(filepart_list_t::const_iterator cancel_iter = cancel_uploaded_list.begin(); cancel_iter != cancel_uploaded_list.end(); ++cancel_iter){
for(auto cancel_iter = cancel_uploaded_list.cbegin(); cancel_iter != cancel_uploaded_list.cend(); ++cancel_iter){
S3FS_PRN_DBG("Cancel uploaded: start(%lld), size(%lld), part number(%d)", static_cast<long long int>(cancel_iter->startpos), static_cast<long long int>(cancel_iter->size), (cancel_iter->petag ? cancel_iter->petag->part_num : -1));
}
}
@ -598,7 +529,7 @@ ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_
//
// Upload Multipart parts
//
if(!ParallelMultipartUpload(path, to_upload_list, false, AutoLock::ALREADY_LOCKED)){
if(!ParallelMultipartUpload(path, to_upload_list, false)){
S3FS_PRN_ERR("Failed to upload multipart parts.");
return -EIO;
}
@ -621,8 +552,8 @@ int PseudoFdInfo::WaitAllThreadsExit()
int result;
bool is_loop = true;
{
AutoLock auto_lock(&upload_list_lock);
if(0 == instruct_count && 0 == completed_count){
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(0 == instruct_count){
result = last_result;
is_loop = false;
}
@ -630,13 +561,10 @@ int PseudoFdInfo::WaitAllThreadsExit()
while(is_loop){
// need to wait the worker exiting
uploaded_sem.wait();
uploaded_sem.acquire();
{
AutoLock auto_lock(&upload_list_lock);
if(0 < completed_count){
--completed_count;
}
if(0 == instruct_count && 0 == completed_count){
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(0 == --instruct_count){
// break loop
result = last_result;
is_loop = false;
@ -651,8 +579,8 @@ bool PseudoFdInfo::CancelAllThreads()
{
bool need_cancel = false;
{
AutoLock auto_lock(&upload_list_lock);
if(0 < instruct_count && 0 < completed_count){
const std::lock_guard<std::mutex> lock(upload_list_lock);
if(0 < instruct_count){
S3FS_PRN_INFO("The upload thread is running, so cancel them and wait for the end.");
need_cancel = true;
last_result = -ECANCELED; // to stop thread running
@ -665,7 +593,7 @@ bool PseudoFdInfo::CancelAllThreads()
}
//
// Extract the list for multipart upload from the Unteated Area
// Extract the list for multipart upload from the Untreated Area
//
// The untreated_start parameter must be set aligning it with the boundaries
// of the maximum multipart upload size. This method expects it to be bounded.
@ -683,14 +611,14 @@ bool PseudoFdInfo::CancelAllThreads()
// [NOTE]
// Maximum multipart upload size must be uploading boundary.
//
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t untreated_start, off_t untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
{
if(untreated_start < 0 || untreated_size <= 0){
S3FS_PRN_ERR("Paramters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
S3FS_PRN_ERR("Parameters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
return false;
}
// Initiliaze lists
// Initialize lists
to_upload_list.clear();
cancel_upload_list.clear();
@ -716,28 +644,32 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_st
// Also, it is assumed that it must not be a copy area.
// So if the areas overlap, include uploaded area as an untreated area.
//
for(filepart_list_t::iterator cur_iter = upload_list.begin(); cur_iter != upload_list.end(); /* ++cur_iter */){
// Check overlap
if((cur_iter->startpos + cur_iter->size - 1) < aligned_start || (aligned_start + aligned_size - 1) < cur_iter->startpos){
// Areas do not overlap
++cur_iter;
{
const std::lock_guard<std::mutex> lock(upload_list_lock);
}else{
// The areas overlap
//
// Since the start position of the uploaded area is aligned with the boundary,
// it is not necessary to check the start position.
// If the uploaded area exceeds the untreated area, expand the untreated area.
//
if((aligned_start + aligned_size - 1) < (cur_iter->startpos + cur_iter->size - 1)){
aligned_size += (cur_iter->startpos + cur_iter->size) - (aligned_start + aligned_size);
for(auto cur_iter = upload_list.begin(); cur_iter != upload_list.end(); /* ++cur_iter */){
// Check overlap
if((cur_iter->startpos + cur_iter->size - 1) < aligned_start || (aligned_start + aligned_size - 1) < cur_iter->startpos){
// Areas do not overlap
++cur_iter;
}else{
// The areas overlap
//
// Since the start position of the uploaded area is aligned with the boundary,
// it is not necessary to check the start position.
// If the uploaded area exceeds the untreated area, expand the untreated area.
//
if((aligned_start + aligned_size - 1) < (cur_iter->startpos + cur_iter->size - 1)){
aligned_size += (cur_iter->startpos + cur_iter->size) - (aligned_start + aligned_size);
}
//
// Add this to cancel list
//
cancel_upload_list.push_back(*cur_iter); // Copy and Push to cancel list
cur_iter = upload_list.erase(cur_iter);
}
//
// Add this to cancel list
//
cancel_upload_list.push_back(*cur_iter); // Copy and Push to cancel list
cur_iter = upload_list.erase(cur_iter);
}
}
@ -773,9 +705,9 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_st
//
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy)
{
AutoLock auto_lock(&upload_list_lock);
const std::lock_guard<std::mutex> lock(upload_list_lock);
// Initiliaze lists
// Initialize lists
to_upload_list.clear();
to_copy_list.clear();
to_download_list.clear();
@ -787,8 +719,8 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
untreated_list.Duplicate(dup_untreated_list);
// Initialize the iterator of each list first
untreated_list_t::iterator dup_untreated_iter = dup_untreated_list.begin();
filepart_list_t::iterator uploaded_iter = upload_list.begin();
auto dup_untreated_iter = dup_untreated_list.begin();
auto uploaded_iter = upload_list.begin();
//
// Loop to extract areas to upload and download
@ -803,7 +735,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
cur_size = ((cur_start + max_mp_size) <= file_size ? max_mp_size : (file_size - cur_start));
//
// Extract the untreated erea that overlaps this current area.
// Extract the untreated area that overlaps this current area.
// (The extracted area is deleted from dup_untreated_list.)
//
untreated_list_t cur_untreated_list;
@ -843,13 +775,13 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// The untreated area exceeds the end of the current area
//
// Ajust untreated area
// Adjust untreated area
tmp_untreated_size = (cur_start + cur_size) - tmp_untreated_start;
// Add ajusted untreated area to cur_untreated_list
// Add adjusted untreated area to cur_untreated_list
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
// Remove this ajusted untreated area from the area pointed
// Remove this adjusted untreated area from the area pointed
// to by dup_untreated_iter.
dup_untreated_iter->size = (dup_untreated_iter->start + dup_untreated_iter->size) - (cur_start + cur_size);
dup_untreated_iter->start = tmp_untreated_start + tmp_untreated_size;
@ -871,7 +803,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// It also assumes that each size of uploaded area must be a maximum upload
// size.
//
filepart_list_t::iterator overlap_uploaded_iter = upload_list.end();
auto overlap_uploaded_iter = upload_list.end();
for(; uploaded_iter != upload_list.end(); ++uploaded_iter){
if((cur_start < (uploaded_iter->startpos + uploaded_iter->size)) && (uploaded_iter->startpos < (cur_start + cur_size))){
if(overlap_uploaded_iter != upload_list.end()){
@ -972,7 +904,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
off_t changed_start = cur_start;
off_t changed_size = cur_size;
bool first_area = true;
for(untreated_list_t::const_iterator tmp_cur_untreated_iter = cur_untreated_list.begin(); tmp_cur_untreated_iter != cur_untreated_list.end(); ++tmp_cur_untreated_iter, first_area = false){
for(auto tmp_cur_untreated_iter = cur_untreated_list.cbegin(); tmp_cur_untreated_iter != cur_untreated_list.cend(); ++tmp_cur_untreated_iter, first_area = false){
if(tmp_cur_start < tmp_cur_untreated_iter->start){
//
// Detected a gap at the start of area
@ -988,14 +920,14 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// within 5GB and the remaining area after unification is
// larger than the minimum multipart upload size.
//
mp_part_list_t::reverse_iterator copy_riter = to_copy_list.rbegin();
auto copy_riter = to_copy_list.rbegin();
if( (copy_riter->start + copy_riter->size) == tmp_cur_start &&
(copy_riter->size + (tmp_cur_untreated_iter->start - tmp_cur_start)) <= FIVE_GB &&
((tmp_cur_start + tmp_cur_size) - tmp_cur_untreated_iter->start) >= MIN_MULTIPART_SIZE )
{
//
// Unify to this area to previouse copy area.
// Unify to this area to previous copy area.
//
copy_riter->size += tmp_cur_untreated_iter->start - tmp_cur_start;
S3FS_PRN_DBG("Resize to copy: start=%lld, size=%lld", static_cast<long long int>(copy_riter->start), static_cast<long long int>(copy_riter->size));

View File

@ -22,71 +22,48 @@
#define S3FS_FDCACHE_FDINFO_H_
#include <memory>
#include <mutex>
#include <string>
#include "common.h"
#include "fdcache_entity.h"
#include "psemaphore.h"
#include "metaheader.h"
#include "autolock.h"
#include "types.h"
class FdEntity;
class UntreatedParts;
//------------------------------------------------
// Structure of parameters to pass to thread
//------------------------------------------------
class PseudoFdInfo;
struct pseudofdinfo_thparam
{
PseudoFdInfo* ppseudofdinfo;
std::string path;
std::string upload_id;
int upload_fd;
off_t start;
off_t size;
bool is_copy;
int part_num;
etagpair* petag;
pseudofdinfo_thparam() : ppseudofdinfo(nullptr), path(""), upload_id(""), upload_fd(-1), start(0), size(0), is_copy(false), part_num(-1), petag(nullptr) {}
};
//------------------------------------------------
// Class PseudoFdInfo
//------------------------------------------------
class PseudoFdInfo
{
private:
static int max_threads;
static int opt_max_threads; // for option value
int pseudo_fd;
int physical_fd;
int flags; // flags at open
std::string upload_id;
int upload_fd; // duplicated fd for uploading
filepart_list_t upload_list;
petagpool etag_entities; // list of etag string and part number entities(to maintain the etag entity even if MPPART_INFO is destroyed)
bool is_lock_init;
mutable pthread_mutex_t upload_list_lock; // protects upload_id and upload_list
Semaphore uploaded_sem; // use a semaphore to trigger an upload completion like event flag
int instruct_count; // number of instructions for processing by threads
int completed_count; // number of completed processes by thread
int last_result; // the result of thread processing
mutable std::mutex upload_list_lock; // protects upload_id/fd, upload_list, etc.
std::string upload_id GUARDED_BY(upload_list_lock); //
int upload_fd GUARDED_BY(upload_list_lock); // duplicated fd for uploading
filepart_list_t upload_list GUARDED_BY(upload_list_lock);
petagpool etag_entities GUARDED_BY(upload_list_lock); // list of etag string and part number entities(to maintain the etag entity even if MPPART_INFO is destroyed)
int instruct_count GUARDED_BY(upload_list_lock); // number of instructions for processing by threads
int last_result GUARDED_BY(upload_list_lock); // the result of thread processing
Semaphore uploaded_sem; // use a semaphore to trigger an upload completion like event flag
private:
static void* MultipartUploadThreadWorker(void* arg);
bool Clear();
void CloseUploadFd();
bool OpenUploadFd(AutoLock::Type type = AutoLock::NONE);
bool ResetUploadInfo(AutoLock::Type type);
bool RowInitialUploadInfo(const std::string& id, bool is_cancel_mp, AutoLock::Type type);
bool CompleteInstruction(int result, AutoLock::Type type = AutoLock::NONE);
bool ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy, AutoLock::Type type = AutoLock::NONE);
bool InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type = AutoLock::NONE);
bool OpenUploadFd();
bool ResetUploadInfo() REQUIRES(upload_list_lock);
bool RowInitialUploadInfo(const std::string& id, bool is_cancel_mp);
void IncreaseInstructionCount();
bool GetUploadInfo(std::string& id, int& fd) const;
bool ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy);
bool InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag);
bool CancelAllThreads();
bool ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);
bool ExtractUploadPartsFromUntreatedArea(off_t untreated_start, off_t untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);
bool IsUploadingHasLock() const REQUIRES(upload_list_lock);
public:
explicit PseudoFdInfo(int fd = -1, int open_flags = 0);
@ -104,18 +81,19 @@ class PseudoFdInfo
bool Set(int fd, int open_flags);
bool ClearUploadInfo(bool is_cancel_mp = false);
bool InitialUploadInfo(const std::string& id){ return RowInitialUploadInfo(id, true, AutoLock::NONE); }
bool InitialUploadInfo(const std::string& id){ return RowInitialUploadInfo(id, true); }
bool IsUploading() const { return !upload_id.empty(); }
bool IsUploading() const;
bool GetUploadId(std::string& id) const;
bool GetEtaglist(etaglist_t& list) const;
bool AppendUploadPart(off_t start, off_t size, bool is_copy = false, etagpair** ppetag = nullptr);
bool ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result);
int PreMultipartUploadRequest(const std::string& strpath, const headers_t& meta);
int WaitAllThreadsExit();
ssize_t UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent);
ssize_t UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent) REQUIRES(pfdent->GetMutex());
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy);
};

View File

@ -23,6 +23,7 @@
#include <memory>
#include <unistd.h>
#include <sstream>
#include <string>
#include <sys/stat.h>
#include "common.h"
@ -40,7 +41,7 @@ static constexpr int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in
// fdpage_list_t utility
//------------------------------------------------
// Inline function for repeated processing
inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, const fdpage& orgpage, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
static inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, const fdpage& orgpage, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
{
if(0 < orgpage.bytes){
// [NOTE]
@ -75,7 +76,7 @@ static void raw_compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t&
fdpage* lastpage = nullptr;
fdpage_list_t::iterator add_iter;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(0 == iter->bytes){
continue;
}
@ -139,7 +140,7 @@ static void compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t& comp
static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize)
{
fdpage_list_t parsed_pages;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(iter->modified){
// modified page
fdpage tmppage = *iter;
@ -233,7 +234,7 @@ bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& spars
//
bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
{
std::unique_ptr<char[]> readbuff(new char[CHECK_CACHEFILE_PART_SIZE]);
auto readbuff = std::make_unique<char[]>(CHECK_CACHEFILE_PART_SIZE);
for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){
if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){
@ -270,7 +271,7 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
// checkpage: This is one state of the cache file, it is loaded from the stats file.
// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA).
// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true.
// fd: opened file discriptor to target cache file.
// fd: opened file descriptor to target cache file.
//
bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
{
@ -288,7 +289,7 @@ bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpag
//
bool result = true;
for(fdpage_list_t::const_iterator iter = sparse_list.begin(); iter != sparse_list.end(); ++iter){
for(auto iter = sparse_list.cbegin(); iter != sparse_list.cend(); ++iter){
off_t check_start = 0;
off_t check_bytes = 0;
if((iter->offset + iter->bytes) <= checkpage.offset){
@ -353,7 +354,7 @@ void PageList::FreeList(fdpage_list_t& list)
list.clear();
}
PageList::PageList(off_t size, bool is_loaded, bool is_modified, bool shrinked) : is_shrink(shrinked)
PageList::PageList(off_t size, bool is_loaded, bool is_modified, bool shrunk) : is_shrink(shrunk)
{
Init(size, is_loaded, is_modified);
}
@ -384,14 +385,14 @@ off_t PageList::Size() const
if(pages.empty()){
return 0;
}
fdpage_list_t::const_reverse_iterator riter = pages.rbegin();
auto riter = pages.rbegin();
return riter->next();
}
bool PageList::Compress()
{
fdpage* lastpage = nullptr;
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){
for(auto iter = pages.begin(); iter != pages.end(); ){
if(!lastpage){
// First item
lastpage = &(*iter);
@ -427,7 +428,7 @@ bool PageList::Compress()
bool PageList::Parse(off_t new_pos)
{
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.begin(); iter != pages.end(); ++iter){
if(new_pos == iter->offset){
// nothing to do
return true;
@ -462,7 +463,7 @@ bool PageList::Resize(off_t size, bool is_loaded, bool is_modified)
}else if(size < total){
// cut area
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){
for(auto iter = pages.begin(); iter != pages.end(); ){
if(iter->next() <= size){
++iter;
}else{
@ -485,7 +486,7 @@ bool PageList::Resize(off_t size, bool is_loaded, bool is_modified)
bool PageList::IsPageLoaded(off_t start, off_t size) const
{
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(iter->end() < start){
continue;
}
@ -525,7 +526,7 @@ bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_statu
Parse(start + size);
// set loaded flag
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.begin(); iter != pages.end(); ++iter){
if(iter->end() < start){
continue;
}else if(start + size <= iter->offset){
@ -542,7 +543,7 @@ bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_statu
bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const
{
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(start <= iter->end()){
if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas
resstart = iter->offset;
@ -568,7 +569,7 @@ off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size, off_t limit_si
}
off_t next = start + size;
off_t restsize = 0;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(iter->next() <= start){
continue;
}
@ -609,7 +610,7 @@ size_t PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off
}
off_t next = start + size;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(iter->next() <= start){
continue;
}
@ -626,7 +627,7 @@ size_t PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off
off_t page_size = page_next - page_start;
// add list
fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin();
auto riter = unloaded_list.rbegin();
if(riter != unloaded_list.rend() && riter->next() == page_start){
// merge to before page
riter->bytes += page_size;
@ -657,7 +658,7 @@ bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_lis
compress_fdpage_list_ignore_load(pages, modified_pages, false);
fdpage prev_page;
for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){
for(auto iter = modified_pages.cbegin(); iter != modified_pages.cend(); ++iter){
if(iter->modified){
// current is modified area
if(!prev_page.modified){
@ -754,7 +755,7 @@ bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size
// extract areas without data
fdpage_list_t tmp_pagelist;
off_t stop_pos = (0L == size ? -1 : (start + size));
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if((iter->offset + iter->bytes) < start){
continue;
}
@ -786,7 +787,7 @@ bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size
off_t PageList::BytesModified() const
{
off_t total = 0;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(iter->modified){
total += iter->bytes;
}
@ -799,7 +800,7 @@ bool PageList::IsModified() const
if(is_shrink){
return true;
}
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(iter->modified){
return true;
}
@ -811,7 +812,7 @@ bool PageList::ClearAllModified()
{
is_shrink = false;
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.begin(); iter != pages.end(); ++iter){
if(iter->modified){
iter->modified = false;
}
@ -819,155 +820,151 @@ bool PageList::ClearAllModified()
return Compress();
}
bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
bool PageList::Serialize(const CacheFileStat& file, ino_t inode) const
{
// make contents
std::ostringstream ssall;
ssall << inode << ":" << Size();
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0");
}
std::string strall = ssall.str();
// over write
if(!file.OverWriteFile(strall)){
return false;
}
return true;
}
bool PageList::Deserialize(CacheFileStat& file, ino_t inode)
{
if(!file.Open()){
return false;
}
if(is_output){
//
// put to file
//
std::ostringstream ssall;
ssall << inode << ":" << Size();
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0");
}
//
// loading from file
//
struct stat st{};
if(-1 == fstat(file.GetFd(), &st)){
S3FS_PRN_ERR("fstat is failed. errno(%d)", errno);
return false;
}
if(0 >= st.st_size){
// nothing
Init(0, false, false);
return true;
}
auto ptmp = std::make_unique<char[]>(st.st_size + 1);
ssize_t result;
// read from file
if(0 >= (result = pread(file.GetFd(), ptmp.get(), st.st_size, 0))){
S3FS_PRN_ERR("failed to read stats(%d)", errno);
return false;
}
ptmp[result] = '\0';
std::string oneline;
std::istringstream ssall(ptmp.get());
if(-1 == ftruncate(file.GetFd(), 0)){
S3FS_PRN_ERR("failed to truncate file(to 0) for stats(%d)", errno);
return false;
}
std::string strall = ssall.str();
if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){
S3FS_PRN_ERR("failed to write stats(%d)", errno);
return false;
}
// loaded
Clear();
// load head line(for size and inode)
off_t total;
ino_t cache_inode; // if this value is 0, it means old format.
if(!getline(ssall, oneline, '\n')){
S3FS_PRN_ERR("failed to parse stats.");
return false;
}else{
//
// loading from file
//
struct stat st;
memset(&st, 0, sizeof(struct stat));
if(-1 == fstat(file.GetFd(), &st)){
S3FS_PRN_ERR("fstat is failed. errno(%d)", errno);
return false;
}
if(0 >= st.st_size){
// nothing
Init(0, false, false);
return true;
}
std::unique_ptr<char[]> ptmp(new char[st.st_size + 1]);
ssize_t result;
// read from file
if(0 >= (result = pread(file.GetFd(), ptmp.get(), st.st_size, 0))){
S3FS_PRN_ERR("failed to read stats(%d)", errno);
return false;
}
ptmp[result] = '\0';
std::string oneline;
std::istringstream ssall(ptmp.get());
// loaded
Clear();
// load head line(for size and inode)
off_t total;
ino_t cache_inode; // if this value is 0, it means old format.
if(!getline(ssall, oneline, '\n')){
std::istringstream sshead(oneline);
std::string strhead1;
std::string strhead2;
// get first part in head line.
if(!getline(sshead, strhead1, ':')){
S3FS_PRN_ERR("failed to parse stats.");
return false;
}
// get second part in head line.
if(!getline(sshead, strhead2, ':')){
// old head format is "<size>\n"
total = cvt_strtoofft(strhead1.c_str(), /* base= */10);
cache_inode = 0;
}else{
std::istringstream sshead(oneline);
std::string strhead1;
std::string strhead2;
// get first part in head line.
if(!getline(sshead, strhead1, ':')){
S3FS_PRN_ERR("failed to parse stats.");
// current head format is "<inode>:<size>\n"
total = cvt_strtoofft(strhead2.c_str(), /* base= */10);
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
if(0 == cache_inode){
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
return false;
}
// get second part in head line.
if(!getline(sshead, strhead2, ':')){
// old head format is "<size>\n"
total = cvt_strtoofft(strhead1.c_str(), /* base= */10);
cache_inode = 0;
}else{
// current head format is "<inode>:<size>\n"
total = cvt_strtoofft(strhead2.c_str(), /* base= */10);
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
if(0 == cache_inode){
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
return false;
}
}
}
// check inode number
if(0 != cache_inode && cache_inode != inode){
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
return false;
}
// load each part
bool is_err = false;
while(getline(ssall, oneline, '\n')){
std::string part;
std::istringstream ssparts(oneline);
// offset
if(!getline(ssparts, part, ':')){
is_err = true;
break;
}
off_t offset = cvt_strtoofft(part.c_str(), /* base= */10);
// size
if(!getline(ssparts, part, ':')){
is_err = true;
break;
}
off_t size = cvt_strtoofft(part.c_str(), /* base= */10);
// loaded
if(!getline(ssparts, part, ':')){
is_err = true;
break;
}
bool is_loaded = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
bool is_modified;
if(!getline(ssparts, part, ':')){
is_modified = false; // old version does not have this part.
}else{
is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
}
// add new area
PageList::page_status pstatus = PageList::page_status::NOT_LOAD_MODIFIED;
if(is_loaded){
if(is_modified){
pstatus = PageList::page_status::LOAD_MODIFIED;
}else{
pstatus = PageList::page_status::LOADED;
}
}else{
if(is_modified){
pstatus = PageList::page_status::MODIFIED;
}
}
SetPageLoadedStatus(offset, size, pstatus);
}
if(is_err){
S3FS_PRN_ERR("failed to parse stats.");
Clear();
return false;
}
// check size
if(total != Size()){
S3FS_PRN_ERR("different size(%lld - %lld).", static_cast<long long int>(total), static_cast<long long int>(Size()));
Clear();
return false;
}
}
// check inode number
if(0 != cache_inode && cache_inode != inode){
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
return false;
}
// load each part
bool is_err = false;
while(getline(ssall, oneline, '\n')){
std::string part;
std::istringstream ssparts(oneline);
// offset
if(!getline(ssparts, part, ':')){
is_err = true;
break;
}
off_t offset = cvt_strtoofft(part.c_str(), /* base= */10);
// size
if(!getline(ssparts, part, ':')){
is_err = true;
break;
}
off_t size = cvt_strtoofft(part.c_str(), /* base= */10);
// loaded
if(!getline(ssparts, part, ':')){
is_err = true;
break;
}
bool is_loaded = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
bool is_modified;
if(!getline(ssparts, part, ':')){
is_modified = false; // old version does not have this part.
}else{
is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
}
// add new area
PageList::page_status pstatus = PageList::page_status::NOT_LOAD_MODIFIED;
if(is_loaded){
if(is_modified){
pstatus = PageList::page_status::LOAD_MODIFIED;
}else{
pstatus = PageList::page_status::LOADED;
}
}else{
if(is_modified){
pstatus = PageList::page_status::MODIFIED;
}
}
SetPageLoadedStatus(offset, size, pstatus);
}
if(is_err){
S3FS_PRN_ERR("failed to parse stats.");
Clear();
return false;
}
// check size
if(total != Size()){
S3FS_PRN_ERR("different size(%lld - %lld).", static_cast<long long int>(total), static_cast<long long int>(Size()));
Clear();
return false;
}
return true;
}
@ -975,8 +972,8 @@ void PageList::Dump() const
{
int cnt = 0;
S3FS_PRN_DBG("pages (shrinked=%s) = {", (is_shrink ? "yes" : "no"));
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){
S3FS_PRN_DBG("pages (shrunk=%s) = {", (is_shrink ? "yes" : "no"));
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter, ++cnt){
S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast<long long int>(iter->offset), static_cast<long long int>(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified");
}
S3FS_PRN_DBG("}");
@ -992,7 +989,7 @@ void PageList::Dump() const
// If it is a bad area in the previous case, it will be reported as an error.
// If the latter case does not match, it will be reported as a warning.
//
bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) const
{
err_area_list.clear();
warn_area_list.clear();
@ -1017,7 +1014,7 @@ bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_ar
// Compare each pages and sparse_list
bool result = true;
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
for(auto iter = pages.cbegin(); iter != pages.cend(); ++iter){
if(!PageList::CheckAreaInSparseFile(*iter, sparse_list, fd, err_area_list, warn_area_list)){
result = false;
}

View File

@ -21,6 +21,7 @@
#ifndef S3FS_FDCACHE_PAGE_H_
#define S3FS_FDCACHE_PAGE_H_
#include <cstdint>
#include <sys/types.h>
#include <vector>
@ -76,10 +77,10 @@ class PageList
private:
fdpage_list_t pages;
bool is_shrink; // [NOTE] true if it has been shrinked even once
bool is_shrink; // [NOTE] true if it has been shrunk even once
public:
enum class page_status{
enum class page_status : int8_t {
NOT_LOAD_MODIFIED = 0,
LOADED,
MODIFIED,
@ -93,13 +94,16 @@ class PageList
void Clear();
bool Parse(off_t new_pos);
bool Serialize(const CacheFileStat& file, ino_t inode) const;
public:
static void FreeList(fdpage_list_t& list);
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrinked = false);
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrunk = false);
PageList(const PageList&) = delete;
PageList(PageList&&) = delete;
PageList& operator=(const PageList&) = delete;
PageList& operator=(PageList&&) = delete;
~PageList();
bool Init(off_t size, bool is_loaded, bool is_modified);
@ -119,9 +123,9 @@ class PageList
bool ClearAllModified();
bool Compress();
bool Serialize(CacheFileStat& file, bool is_output, ino_t inode);
bool Deserialize(CacheFileStat& file, ino_t inode);
void Dump() const;
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) const;
};
#endif // S3FS_FDCACHE_PAGE_H_

View File

@ -20,11 +20,10 @@
#include <algorithm>
#include <cstdlib>
#include <mutex>
#include <vector>
#include "s3fs_logger.h"
#include "fdcache_pseudofd.h"
#include "autolock.h"
//------------------------------------------------
// Symbols
@ -57,39 +56,12 @@ bool PseudoFdManager::Release(int fd)
//------------------------------------------------
// PseudoFdManager methods
//------------------------------------------------
PseudoFdManager::PseudoFdManager() : is_lock_init(false)
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&pseudofd_list_lock, &attr))){
S3FS_PRN_CRIT("failed to init pseudofd_list_lock: %d", result);
abort();
}
is_lock_init = true;
}
PseudoFdManager::~PseudoFdManager()
{
if(is_lock_init){
int result;
if(0 != (result = pthread_mutex_destroy(&pseudofd_list_lock))){
S3FS_PRN_CRIT("failed to destroy pseudofd_list_lock: %d", result);
abort();
}
is_lock_init = false;
}
}
int PseudoFdManager::GetUnusedMinPseudoFd() const
{
int min_fd = MIN_PSEUDOFD_NUMBER;
// Look for the first discontinuous value.
for(pseudofd_list_t::const_iterator iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){
for(auto iter = pseudofd_list.cbegin(); iter != pseudofd_list.cend(); ++iter){
if(min_fd == (*iter)){
++min_fd;
}else if(min_fd < (*iter)){
@ -101,7 +73,7 @@ int PseudoFdManager::GetUnusedMinPseudoFd() const
int PseudoFdManager::CreatePseudoFd()
{
AutoLock auto_lock(&pseudofd_list_lock);
const std::lock_guard<std::mutex> lock(pseudofd_list_lock);
int new_fd = PseudoFdManager::GetUnusedMinPseudoFd();
pseudofd_list.push_back(new_fd);
@ -112,9 +84,9 @@ int PseudoFdManager::CreatePseudoFd()
bool PseudoFdManager::ReleasePseudoFd(int fd)
{
AutoLock auto_lock(&pseudofd_list_lock);
const std::lock_guard<std::mutex> lock(pseudofd_list_lock);
for(pseudofd_list_t::iterator iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){
for(auto iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){
if(fd == (*iter)){
pseudofd_list.erase(iter);
return true;

View File

@ -21,8 +21,11 @@
#ifndef S3FS_FDCACHE_PSEUDOFD_H_
#define S3FS_FDCACHE_PSEUDOFD_H_
#include <mutex>
#include <vector>
#include "common.h"
//------------------------------------------------
// Typdefs
//------------------------------------------------
@ -36,25 +39,24 @@ typedef std::vector<int> pseudofd_list_t;
class PseudoFdManager
{
private:
pseudofd_list_t pseudofd_list;
bool is_lock_init;
pthread_mutex_t pseudofd_list_lock; // protects pseudofd_list
pseudofd_list_t pseudofd_list GUARDED_BY(pseudofd_list_lock);
std::mutex pseudofd_list_lock; // protects pseudofd_list
private:
static PseudoFdManager& GetManager();
PseudoFdManager();
~PseudoFdManager();
PseudoFdManager() = default;
~PseudoFdManager() = default;
int GetUnusedMinPseudoFd() const REQUIRES(pseudofd_list_lock);
int CreatePseudoFd();
bool ReleasePseudoFd(int fd);
public:
PseudoFdManager(const PseudoFdManager&) = delete;
PseudoFdManager(PseudoFdManager&&) = delete;
PseudoFdManager& operator=(const PseudoFdManager&) = delete;
PseudoFdManager& operator=(PseudoFdManager&&) = delete;
int GetUnusedMinPseudoFd() const;
int CreatePseudoFd();
bool ReleasePseudoFd(int fd);
public:
static int Get();
static bool Release(int fd);
};

View File

@ -20,6 +20,7 @@
#include <cerrno>
#include <unistd.h>
#include <string>
#include <sys/file.h>
#include <sys/stat.h>
@ -192,6 +193,49 @@ bool CacheFileStat::SetPath(const char* tpath, bool is_open)
return Open();
}
// [NOTE]
// There is no need to check whether the file is open because using rename().
//
bool CacheFileStat::OverWriteFile(const std::string& strall) const
{
// make temporary file path(in same cache directory)
std::string sfile_path;
if(0 != CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
return false;
}
std::string strTmpFile = mydirname(sfile_path) + "/.tmpstat.XXXXXX";
strTmpFile.push_back('\0'); // terminate with a null character and allocate space for it.
// open temporary file(mode: 0600)
//
// [TODO]
// Currently, use "&str[pos]" to make it possible to build with C++14.
// Once we support C++17 or later, we will use "str.data()".
//
int tmpfd;
if(-1 == (tmpfd = mkstemp(&strTmpFile[0]))){ // NOLINT(readability-container-data-pointer)
S3FS_PRN_ERR("failed to create temporary cache stat file path(%s) for %s cache", strTmpFile.c_str(), sfile_path.c_str());
return false;
}
// write contents
if(0 >= pwrite(tmpfd, strall.c_str(), strall.length(), 0)){
S3FS_PRN_ERR("failed to write stats to temporary file(%d)", errno);
close(tmpfd);
return false;
}
close(tmpfd);
// rename
if(0 != rename(strTmpFile.c_str(), sfile_path.c_str())){
S3FS_PRN_ERR("failed to rename temporary cache stat file path(%s) to %s cache", strTmpFile.c_str(), sfile_path.c_str());
unlink(strTmpFile.c_str());
return false;
}
return true;
}
bool CacheFileStat::RawOpen(bool readonly)
{
if(path.empty()){

View File

@ -46,12 +46,17 @@ class CacheFileStat
explicit CacheFileStat(const char* tpath = nullptr);
~CacheFileStat();
CacheFileStat(const CacheFileStat&) = delete;
CacheFileStat(CacheFileStat&&) = delete;
CacheFileStat& operator=(const CacheFileStat&) = delete;
CacheFileStat& operator=(CacheFileStat&&) = delete;
bool Open();
bool ReadOnlyOpen();
bool Release();
bool SetPath(const char* tpath, bool is_open = true);
int GetFd() const { return fd; }
bool OverWriteFile(const std::string& strall) const;
};
#endif // S3FS_FDCACHE_STAT_H_

View File

@ -19,64 +19,36 @@
*/
#include <cstdlib>
#include <mutex>
#include "s3fs_logger.h"
#include "fdcache_untreated.h"
#include "autolock.h"
//------------------------------------------------
// UntreatedParts methods
//------------------------------------------------
UntreatedParts::UntreatedParts() : last_tag(0) //, is_lock_init(false)
bool UntreatedParts::empty() const
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&untreated_list_lock, &attr))){
S3FS_PRN_CRIT("failed to init untreated_list_lock: %d", result);
abort();
}
is_lock_init = true;
}
UntreatedParts::~UntreatedParts()
{
if(is_lock_init){
int result;
if(0 != (result = pthread_mutex_destroy(&untreated_list_lock))){
S3FS_PRN_CRIT("failed to destroy untreated_list_lock: %d", result);
abort();
}
is_lock_init = false;
}
}
bool UntreatedParts::empty()
{
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
return untreated_list.empty();
}
bool UntreatedParts::AddPart(off_t start, off_t size)
{
if(start < 0 || size <= 0){
S3FS_PRN_ERR("Paramter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
S3FS_PRN_ERR("Parameter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
return false;
}
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
++last_tag;
// Check the overlap with the existing part and add the part.
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
if(iter->stretch(start, size, last_tag)){
// the part was stretched, thus check if it overlaps with next parts
untreated_list_t::iterator niter = iter;
for(++niter; niter != untreated_list.end(); ){
auto niter = iter;
for(++niter; niter != untreated_list.cend(); ){
if(!iter->stretch(niter->start, niter->size, last_tag)){
// This next part does not overlap with the current part
break;
@ -102,13 +74,13 @@ bool UntreatedParts::AddPart(off_t start, off_t size)
bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const
{
if(max_size <= 0 || min_size < 0 || max_size < min_size){
S3FS_PRN_ERR("Paramter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
S3FS_PRN_ERR("Parameter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
return false;
}
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
// Check the overlap with the existing part and add the part.
for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){
if(!lastpart || iter->untreated_tag == last_tag){
if(min_size <= iter->size){
if(iter->size <= max_size){
@ -137,17 +109,17 @@ bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t
bool UntreatedParts::ClearParts(off_t start, off_t size)
{
if(start < 0 || size < 0){
S3FS_PRN_ERR("Paramter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
S3FS_PRN_ERR("Parameter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
return false;
}
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
if(untreated_list.empty()){
return true;
}
// Check the overlap with the existing part.
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ){
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ){
if(0 != size && (start + size) <= iter->start){
// clear area is in front of iter area, no more to do.
break;
@ -193,9 +165,9 @@ bool UntreatedParts::ClearParts(off_t start, off_t size)
//
bool UntreatedParts::GetLastUpdatePart(off_t& start, off_t& size) const
{
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){
if(iter->untreated_tag == last_tag){
start = iter->start;
size = iter->size;
@ -213,15 +185,15 @@ bool UntreatedParts::GetLastUpdatePart(off_t& start, off_t& size) const
//
bool UntreatedParts::ReplaceLastUpdatePart(off_t start, off_t size)
{
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
if(iter->untreated_tag == last_tag){
if(0 < size){
iter->start = start;
iter->size = size;
}else{
iter = untreated_list.erase(iter);
untreated_list.erase(iter);
}
return true;
}
@ -234,9 +206,9 @@ bool UntreatedParts::ReplaceLastUpdatePart(off_t start, off_t size)
//
bool UntreatedParts::RemoveLastUpdatePart()
{
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
if(iter->untreated_tag == last_tag){
untreated_list.erase(iter);
return true;
@ -250,7 +222,7 @@ bool UntreatedParts::RemoveLastUpdatePart()
//
bool UntreatedParts::Duplicate(untreated_list_t& list)
{
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
list = untreated_list;
return true;
@ -258,10 +230,10 @@ bool UntreatedParts::Duplicate(untreated_list_t& list)
void UntreatedParts::Dump()
{
AutoLock auto_lock(&untreated_list_lock);
const std::lock_guard<std::mutex> lock(untreated_list_lock);
S3FS_PRN_DBG("untreated list = [");
for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){
S3FS_PRN_DBG(" {%014lld - %014lld : tag=%ld}", static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->untreated_tag);
}
S3FS_PRN_DBG("]");

View File

@ -21,6 +21,8 @@
#ifndef S3FS_FDCACHE_UNTREATED_H_
#define S3FS_FDCACHE_UNTREATED_H_
#include <mutex>
#include "common.h"
#include "types.h"
@ -30,24 +32,23 @@
class UntreatedParts
{
private:
mutable pthread_mutex_t untreated_list_lock; // protects untreated_list
bool is_lock_init;
mutable std::mutex untreated_list_lock; // protects untreated_list
untreated_list_t untreated_list;
long last_tag; // [NOTE] Use this to identify the latest updated part.
untreated_list_t untreated_list GUARDED_BY(untreated_list_lock);
long last_tag GUARDED_BY(untreated_list_lock) = 0; // [NOTE] Use this to identify the latest updated part.
private:
bool RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const;
public:
UntreatedParts();
~UntreatedParts();
UntreatedParts() = default;
~UntreatedParts() = default;
UntreatedParts(const UntreatedParts&) = delete;
UntreatedParts(UntreatedParts&&) = delete;
UntreatedParts& operator=(const UntreatedParts&) = delete;
UntreatedParts& operator=(UntreatedParts&&) = delete;
bool empty();
bool empty() const;
bool AddPart(off_t start, off_t size);
bool GetLastUpdatedPart(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE) const { return RowGetPart(start, size, max_size, min_size, true); }

303
src/filetimes.cpp Normal file
View File

@ -0,0 +1,303 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "filetimes.h"
#include "s3fs_logger.h"
#include "string_util.h"
//-------------------------------------------------------------------
// Utility functions
//-------------------------------------------------------------------
//
// result: -1 ts1 < ts2
// 0 ts1 == ts2
// 1 ts1 > ts2
//
bool valid_timespec(const struct timespec& ts)
{
if(0 > ts.tv_sec || UTIME_OMIT == ts.tv_nsec || UTIME_NOW == ts.tv_nsec){
return false;
}
return true;
}
//
// result: -1 ts1 < ts2
// 0 ts1 == ts2
// 1 ts1 > ts2
//
constexpr int compare_timespec(const struct timespec& ts1, const struct timespec& ts2)
{
if(ts1.tv_sec < ts2.tv_sec){
return -1;
}else if(ts1.tv_sec > ts2.tv_sec){
return 1;
}else{
if(ts1.tv_nsec < ts2.tv_nsec){
return -1;
}else if(ts1.tv_nsec > ts2.tv_nsec){
return 1;
}
}
return 0;
}
//
// result: -1 st < ts
// 0 st == ts
// 1 st > ts
//
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts)
{
struct timespec st_ts;
set_stat_to_timespec(st, type, st_ts);
return compare_timespec(st_ts, ts);
}
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts)
{
if(stat_time_type::ATIME == type){
#ifdef __APPLE__
st.st_atime = ts.tv_sec;
st.st_atimespec.tv_nsec = ts.tv_nsec;
#else
st.st_atim.tv_sec = ts.tv_sec;
st.st_atim.tv_nsec = ts.tv_nsec;
#endif
}else if(stat_time_type::MTIME == type){
#ifdef __APPLE__
st.st_mtime = ts.tv_sec;
st.st_mtimespec.tv_nsec = ts.tv_nsec;
#else
st.st_mtim.tv_sec = ts.tv_sec;
st.st_mtim.tv_nsec = ts.tv_nsec;
#endif
}else if(stat_time_type::CTIME == type){
#ifdef __APPLE__
st.st_ctime = ts.tv_sec;
st.st_ctimespec.tv_nsec = ts.tv_nsec;
#else
st.st_ctim.tv_sec = ts.tv_sec;
st.st_ctim.tv_nsec = ts.tv_nsec;
#endif
}else{
S3FS_PRN_ERR("unknown type(%d), so skip to set value.", static_cast<int>(type));
}
}
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts)
{
if(stat_time_type::ATIME == type){
#ifdef __APPLE__
ts.tv_sec = st.st_atime;
ts.tv_nsec = st.st_atimespec.tv_nsec;
#else
ts = st.st_atim;
#endif
}else if(stat_time_type::MTIME == type){
#ifdef __APPLE__
ts.tv_sec = st.st_mtime;
ts.tv_nsec = st.st_mtimespec.tv_nsec;
#else
ts = st.st_mtim;
#endif
}else if(stat_time_type::CTIME == type){
#ifdef __APPLE__
ts.tv_sec = st.st_ctime;
ts.tv_nsec = st.st_ctimespec.tv_nsec;
#else
ts = st.st_ctim;
#endif
}else{
S3FS_PRN_ERR("unknown type(%d), so use 0 as timespec.", static_cast<int>(type));
ts.tv_sec = 0;
ts.tv_nsec = 0;
}
return &ts;
}
std::string str_stat_time(const struct stat& st, stat_time_type type)
{
struct timespec ts;
return str(*set_stat_to_timespec(st, type, ts));
}
struct timespec* s3fs_realtime(struct timespec& ts)
{
if(-1 == clock_gettime(static_cast<clockid_t>(CLOCK_REALTIME), &ts)){
S3FS_PRN_WARN("failed to clock_gettime by errno(%d)", errno);
ts.tv_sec = time(nullptr);
ts.tv_nsec = 0;
}
return &ts;
}
std::string s3fs_str_realtime()
{
struct timespec ts;
return str(*s3fs_realtime(ts));
}
//-------------------------------------------------------------------
// FileTimes Class
//-------------------------------------------------------------------
void FileTimes::Clear()
{
ClearCTime();
ClearATime();
ClearMTime();
}
void FileTimes::Clear(stat_time_type type)
{
if(stat_time_type::CTIME == type){
ft_ctime = {0, UTIME_OMIT};
}else if(stat_time_type::ATIME == type){
ft_atime = {0, UTIME_OMIT};
}else{ // stat_time_type::MTIME
ft_mtime = {0, UTIME_OMIT};
}
}
const struct timespec& FileTimes::GetTime(stat_time_type type) const
{
if(stat_time_type::CTIME == type){
return ft_ctime;
}else if(stat_time_type::ATIME == type){
return ft_atime;
}else{ // stat_time_type::MTIME
return ft_mtime;
}
}
void FileTimes::GetTime(stat_time_type type, struct timespec& time) const
{
if(stat_time_type::CTIME == type){
time = ft_ctime;
}else if(stat_time_type::ATIME == type){
time = ft_atime;
}else{ // stat_time_type::MTIME
time = ft_mtime;
}
}
void FileTimes::ReflectFileTimes(struct stat& st) const
{
if(!IsOmitCTime()){
set_timespec_to_stat(st, stat_time_type::CTIME, ft_ctime);
}
if(!IsOmitATime()){
set_timespec_to_stat(st, stat_time_type::ATIME, ft_atime);
}
if(!IsOmitMTime()){
set_timespec_to_stat(st, stat_time_type::MTIME, ft_mtime);
}
}
void FileTimes::SetTime(stat_time_type type, struct timespec time)
{
if(UTIME_NOW == time.tv_nsec){
s3fs_realtime(time);
}
if(stat_time_type::CTIME == type){
ft_ctime = time;
}else if(stat_time_type::ATIME == type){
ft_atime = time;
}else{ // stat_time_type::MTIME
ft_mtime = time;
}
}
void FileTimes::SetAllNow()
{
struct timespec time;
s3fs_realtime(time);
SetAll(time, time, time);
}
void FileTimes::SetAll(const struct stat& stbuf, bool no_omit)
{
struct timespec ts_ctime;
struct timespec ts_atime;
struct timespec ts_mtime;
set_stat_to_timespec(stbuf, stat_time_type::CTIME, ts_ctime);
set_stat_to_timespec(stbuf, stat_time_type::ATIME, ts_atime);
set_stat_to_timespec(stbuf, stat_time_type::MTIME, ts_mtime);
SetAll(ts_ctime, ts_atime, ts_mtime, no_omit);
}
void FileTimes::SetAll(struct timespec ts_ctime, struct timespec ts_atime, struct timespec ts_mtime, bool no_omit)
{
struct timespec ts_now_time;
s3fs_realtime(ts_now_time);
if(UTIME_NOW == ts_ctime.tv_nsec){
SetCTime(ts_now_time);
}else if(!no_omit || UTIME_OMIT != ts_ctime.tv_nsec){
SetCTime(ts_ctime);
}
if(UTIME_NOW == ts_atime.tv_nsec){
SetATime(ts_now_time);
}else if(!no_omit || UTIME_OMIT != ts_atime.tv_nsec){
SetATime(ts_atime);
}
if(UTIME_NOW == ts_mtime.tv_nsec){
SetMTime(ts_now_time);
}else if(!no_omit || UTIME_OMIT != ts_mtime.tv_nsec){
SetMTime(ts_mtime);
}
}
void FileTimes::SetAll(const FileTimes& other, bool no_omit)
{
if(!no_omit || !other.IsOmitCTime()){
SetCTime(other.ctime());
}
if(!no_omit || !other.IsOmitATime()){
SetATime(other.atime());
}
if(!no_omit || !other.IsOmitMTime()){
SetMTime(other.mtime());
}
}
bool FileTimes::IsOmit(stat_time_type type) const
{
if(stat_time_type::CTIME == type){
return (UTIME_OMIT == ft_ctime.tv_nsec);
}else if(stat_time_type::ATIME == type){
return (UTIME_OMIT == ft_atime.tv_nsec);
}else{ // stat_time_type::MTIME
return (UTIME_OMIT == ft_mtime.tv_nsec);
}
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

120
src/filetimes.h Normal file
View File

@ -0,0 +1,120 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_FILETIMES_H_
#define S3FS_FILETIMES_H_
#include <cstdint>
#include <string>
#include <sys/stat.h>
//-------------------------------------------------------------------
// Utility for stat time type
//-------------------------------------------------------------------
enum class stat_time_type : uint8_t {
ATIME,
MTIME,
CTIME
};
//-------------------------------------------------------------------
// Utility Functions for timespecs
//-------------------------------------------------------------------
bool valid_timespec(const struct timespec& ts);
constexpr int compare_timespec(const struct timespec& ts1, const struct timespec& ts2);
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts);
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts);
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts);
std::string str_stat_time(const struct stat& st, stat_time_type type);
struct timespec* s3fs_realtime(struct timespec& ts);
std::string s3fs_str_realtime();
//-------------------------------------------------------------------
// FileTimes Class
//-------------------------------------------------------------------
// [NOTE]
// In this class, UTIME_OMIT is set when initializing or clearing
// internal data.
// Also, if UTIME_NOW is specified, the value will be corrected to
// the current time and maintained.
//
class FileTimes
{
private:
struct timespec ft_ctime; // Change time
struct timespec ft_atime; // Access time
struct timespec ft_mtime; // Modification time
private:
void Clear(stat_time_type type);
const struct timespec& GetTime(stat_time_type type) const;
void GetTime(stat_time_type type, struct timespec& time) const;
void SetTime(stat_time_type type, struct timespec time);
bool IsOmit(stat_time_type type) const;
public:
explicit FileTimes() : ft_ctime{0, UTIME_OMIT}, ft_atime{0, UTIME_OMIT}, ft_mtime{0, UTIME_OMIT} {}
// Clear
void Clear();
void ClearCTime() { Clear(stat_time_type::CTIME); }
void ClearATime() { Clear(stat_time_type::ATIME); }
void ClearMTime() { Clear(stat_time_type::MTIME); }
// Get value
const struct timespec& ctime() const { return GetTime(stat_time_type::CTIME); }
const struct timespec& atime() const { return GetTime(stat_time_type::ATIME); }
const struct timespec& mtime() const { return GetTime(stat_time_type::MTIME); }
void GetCTime(struct timespec& time) const { GetTime(stat_time_type::CTIME, time); }
void GetATime(struct timespec& time) const { GetTime(stat_time_type::ATIME, time); }
void GetMTime(struct timespec& time) const { GetTime(stat_time_type::MTIME, time); }
void ReflectFileTimes(struct stat& st) const;
// Set value
void SetCTime(struct timespec time) { SetTime(stat_time_type::CTIME, time); }
void SetATime(struct timespec time) { SetTime(stat_time_type::ATIME, time); }
void SetMTime(struct timespec time) { SetTime(stat_time_type::MTIME, time); }
void SetAllNow();
void SetAll(const struct stat& stbuf, bool no_omit = true);
void SetAll(struct timespec ts_ctime, struct timespec ts_atime, struct timespec ts_mtime, bool no_omit = true);
void SetAll(const FileTimes& other, bool no_omit = true);
// Check
bool IsOmitCTime() const { return IsOmit(stat_time_type::CTIME); }
bool IsOmitATime() const { return IsOmit(stat_time_type::ATIME); }
bool IsOmitMTime() const { return IsOmit(stat_time_type::MTIME); }
};
#endif // S3FS_FILETIMES_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -22,7 +22,6 @@
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <pthread.h>
#include <unistd.h>
#include <syslog.h>
#include <sys/types.h>
@ -112,7 +111,7 @@ std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const
return nullptr;
}
std::unique_ptr<unsigned char[]> digest(new unsigned char[SHA1_DIGEST_SIZE]);
auto digest = std::make_unique<unsigned char[]>(SHA1_DIGEST_SIZE);
struct hmac_sha1_ctx ctx_hmac;
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
@ -129,7 +128,7 @@ std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, co
return nullptr;
}
std::unique_ptr<unsigned char[]> digest(new unsigned char[SHA256_DIGEST_SIZE]);
auto digest = std::make_unique<unsigned char[]>(SHA256_DIGEST_SIZE);
struct hmac_sha256_ctx ctx_hmac;
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
@ -151,7 +150,7 @@ std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
return nullptr;
}
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen + 1]);
auto digest = std::make_unique<unsigned char[]>(*digestlen + 1);
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, digest.get())){
return nullptr;
}
@ -167,7 +166,7 @@ std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, co
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
return nullptr;
}
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen + 1]);
auto digest = std::make_unique<unsigned char[]>(*digestlen + 1);
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, digest.get())){
return nullptr;
}
@ -206,10 +205,9 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
md5_init(&ctx_md5);
for(off_t total = 0; total < size; total += bytes){
off_t len = 512;
unsigned char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -218,7 +216,7 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
S3FS_PRN_ERR("file read error(%d)", errno);
return false;
}
md5_update(&ctx_md5, bytes, buf);
md5_update(&ctx_md5, bytes, reinterpret_cast<const uint8_t*>(buf.data()));
}
md5_digest(&ctx_md5, result->size(), result->data());
@ -261,10 +259,9 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
}
for(off_t total = 0; total < size; total += bytes){
off_t len = 512;
char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -274,7 +271,7 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
gcry_md_close(ctx_md5);
return false;
}
gcry_md_write(ctx_md5, buf, bytes);
gcry_md_write(ctx_md5, buf.data(), bytes);
}
memcpy(result->data(), gcry_md_read(ctx_md5, 0), result->size());
gcry_md_close(ctx_md5);
@ -306,10 +303,9 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
sha256_init(&ctx_sha256);
for(off_t total = 0; total < size; total += bytes){
off_t len = 512;
unsigned char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -318,7 +314,7 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
S3FS_PRN_ERR("file read error(%d)", errno);
return false;
}
sha256_update(&ctx_sha256, bytes, buf);
sha256_update(&ctx_sha256, bytes, reinterpret_cast<const uint8_t*>(buf.data()));
}
sha256_digest(&ctx_sha256, result->size(), result->data());
@ -362,10 +358,9 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
}
for(off_t total = 0; total < size; total += bytes){
off_t len = 512;
char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -375,7 +370,7 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
gcry_md_close(ctx_sha256);
return false;
}
gcry_md_write(ctx_sha256, buf, bytes);
gcry_md_write(ctx_sha256, buf.data(), bytes);
}
memcpy(result->data(), gcry_md_read(ctx_sha256, 0), result->size());
gcry_md_close(ctx_sha256);

View File

@ -19,15 +19,18 @@
*/
#include <ctime>
#include <unistd.h>
#include <string>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "common.h"
#include "metaheader.h"
#include "string_util.h"
#include "s3fs_util.h"
#include "filetimes.h"
static constexpr struct timespec DEFAULT_TIMESPEC = {-1, 0};
static constexpr struct timespec ERROR_TIMESPEC = {-1, 0};
static constexpr struct timespec OMIT_TIMESPEC = {0, UTIME_OMIT};
//-------------------------------------------------------------------
// Utility functions for convert
@ -57,53 +60,54 @@ static struct timespec cvt_string_to_time(const char *str)
static struct timespec get_time(const headers_t& meta, const char *header)
{
headers_t::const_iterator iter;
if(meta.end() == (iter = meta.find(header))){
return DEFAULT_TIMESPEC;
if(meta.cend() == (iter = meta.find(header))){
return ERROR_TIMESPEC;
}
return cvt_string_to_time((*iter).second.c_str());
}
struct timespec get_mtime(const headers_t& meta, bool overcheck)
{
struct timespec t = get_time(meta, "x-amz-meta-mtime");
if(0 < t.tv_sec){
return t;
struct timespec mtime = get_time(meta, "x-amz-meta-mtime");
if(0 <= mtime.tv_sec && UTIME_OMIT != mtime.tv_nsec){
return mtime;
}
t = get_time(meta, "x-amz-meta-goog-reserved-file-mtime");
if(0 < t.tv_sec){
return t;
mtime = get_time(meta, "x-amz-meta-goog-reserved-file-mtime");
if(0 <= mtime.tv_sec && UTIME_OMIT != mtime.tv_nsec){
return mtime;
}
if(overcheck){
struct timespec ts = {get_lastmodified(meta), 0};
return ts;
mtime = {get_lastmodified(meta), 0};
return mtime;
}
return DEFAULT_TIMESPEC;
return OMIT_TIMESPEC;
}
struct timespec get_ctime(const headers_t& meta, bool overcheck)
{
struct timespec t = get_time(meta, "x-amz-meta-ctime");
if(0 < t.tv_sec){
return t;
struct timespec ctime = get_time(meta, "x-amz-meta-ctime");
if(0 <= ctime.tv_sec && UTIME_OMIT != ctime.tv_nsec){
return ctime;
}
if(overcheck){
struct timespec ts = {get_lastmodified(meta), 0};
return ts;
ctime = {get_lastmodified(meta), 0};
return ctime;
}
return DEFAULT_TIMESPEC;
return OMIT_TIMESPEC;
}
struct timespec get_atime(const headers_t& meta, bool overcheck)
{
struct timespec t = get_time(meta, "x-amz-meta-atime");
if(0 < t.tv_sec){
return t;
struct timespec atime = get_time(meta, "x-amz-meta-atime");
if(0 <= atime.tv_sec && UTIME_OMIT != atime.tv_nsec){
return atime;
}
if(overcheck){
struct timespec ts = {get_lastmodified(meta), 0};
return ts;
atime = {get_lastmodified(meta), 0};
return atime;
}
return DEFAULT_TIMESPEC;
return OMIT_TIMESPEC;
}
off_t get_size(const char *s)
@ -113,8 +117,8 @@ off_t get_size(const char *s)
off_t get_size(const headers_t& meta)
{
headers_t::const_iterator iter = meta.find("Content-Length");
if(meta.end() == iter){
auto iter = meta.find("Content-Length");
if(meta.cend() == iter){
return 0;
}
return get_size((*iter).second.c_str());
@ -131,12 +135,12 @@ mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir
bool isS3sync = false;
headers_t::const_iterator iter;
if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){
if(meta.cend() != (iter = meta.find("x-amz-meta-mode"))){
mode = get_mode((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
}else if(meta.cend() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
mode = get_mode((*iter).second.c_str());
isS3sync = true;
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
mode = get_mode((*iter).second.c_str(), 8);
}else{
// If another tool creates an object without permissions, default to owner
@ -153,7 +157,7 @@ mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir
if(forcedir){
mode |= S_IFDIR;
}else{
if(meta.end() != (iter = meta.find("Content-Type"))){
if(meta.cend() != (iter = meta.find("Content-Type"))){
std::string strConType = (*iter).second;
// Leave just the mime type, remove any optional parameters (eg charset)
std::string::size_type pos = strConType.find(';');
@ -205,6 +209,105 @@ mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir
return mode;
}
// [NOTE]
// Gets a only FMT bit in mode from meta headers.
// The processing is almost the same as get_mode().
// This function is intended to be used from get_object_attribute().
//
static mode_t convert_meta_to_mode_fmt(const headers_t& meta)
{
mode_t mode = 0;
bool isS3sync = false;
headers_t::const_iterator iter;
if(meta.cend() != (iter = meta.find("x-amz-meta-mode"))){
mode = get_mode((*iter).second.c_str());
}else if(meta.cend() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
mode = get_mode((*iter).second.c_str());
isS3sync = true;
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
mode = get_mode((*iter).second.c_str(), 8);
}
if(!(mode & S_IFMT)){
if(!isS3sync){
if(meta.cend() != (iter = meta.find("Content-Type"))){
std::string strConType = (*iter).second;
// Leave just the mime type, remove any optional parameters (eg charset)
std::string::size_type pos = strConType.find(';');
if(std::string::npos != pos){
strConType.erase(pos);
}
if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){
// Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
mode |= S_IFDIR;
}
}
}
}
return (mode & S_IFMT);
}
bool is_reg_fmt(const headers_t& meta)
{
return S_ISREG(convert_meta_to_mode_fmt(meta));
}
bool is_symlink_fmt(const headers_t& meta)
{
return S_ISLNK(convert_meta_to_mode_fmt(meta));
}
bool is_dir_fmt(const headers_t& meta)
{
return S_ISDIR(convert_meta_to_mode_fmt(meta));
}
// [NOTE]
// For directory types, detailed judgment is not possible.
// DIR_NORMAL is always returned.
//
// Objects uploaded using clients other than s3fs has a Content-Type
// of application/unknown and dose not have x-amz-meta-mode header.
// In this case, you can specify objtype_t as default_type.
//
objtype_t derive_object_type(const std::string& strpath, const headers_t& meta, objtype_t default_type)
{
mode_t mode = convert_meta_to_mode_fmt(meta);
if(S_ISDIR(mode)){
if('/' != *strpath.rbegin()){
return objtype_t::DIR_NOT_TERMINATE_SLASH;
}else if(std::string::npos != strpath.find("_$folder$", 0)){
return objtype_t::DIR_FOLDER_SUFFIX;
}else{
// [NOTE]
// It returns DIR_NORMAL, although it could be DIR_NOT_EXIST_OBJECT.
//
return objtype_t::DIR_NORMAL;
}
}else if(S_ISLNK(mode)){
return objtype_t::SYMLINK;
}else if(S_ISREG(mode)){
return objtype_t::FILE;
}else if(0 == mode){
// If the x-amz-meta-mode header is not present, mode is 0.
headers_t::const_iterator iter;
if(meta.cend() != (iter = meta.find("Content-Type"))){
std::string strConType = iter->second;
// Leave just the mime type, remove any optional parameters (eg charset)
std::string::size_type pos = strConType.find(';');
if(std::string::npos != pos){
strConType.erase(pos);
}
if(strConType == "application/unknown"){
return default_type;
}
}
}
return objtype_t::UNKNOWN;
}
uid_t get_uid(const char *s)
{
return static_cast<uid_t>(cvt_strtoofft(s, /*base=*/ 0));
@ -213,11 +316,11 @@ uid_t get_uid(const char *s)
uid_t get_uid(const headers_t& meta)
{
headers_t::const_iterator iter;
if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){
if(meta.cend() != (iter = meta.find("x-amz-meta-uid"))){
return get_uid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
}else if(meta.cend() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
return get_uid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS
return get_uid((*iter).second.c_str());
}else{
return geteuid();
@ -232,11 +335,11 @@ gid_t get_gid(const char *s)
gid_t get_gid(const headers_t& meta)
{
headers_t::const_iterator iter;
if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){
if(meta.cend() != (iter = meta.find("x-amz-meta-gid"))){
return get_gid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
}else if(meta.cend() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
return get_gid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS
return get_gid((*iter).second.c_str());
}else{
return getegid();
@ -250,30 +353,28 @@ blkcnt_t get_blocks(off_t size)
time_t cvtIAMExpireStringToTime(const char* s)
{
struct tm tm;
struct tm tm{};
if(!s){
return 0L;
}
memset(&tm, 0, sizeof(struct tm));
strptime(s, "%Y-%m-%dT%H:%M:%S", &tm);
s3fs_strptime(s, "%Y-%m-%dT%H:%M:%S", &tm);
return timegm(&tm); // GMT
}
time_t get_lastmodified(const char* s)
{
struct tm tm;
struct tm tm{};
if(!s){
return -1;
}
memset(&tm, 0, sizeof(struct tm));
strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm);
s3fs_strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm);
return timegm(&tm); // GMT
}
time_t get_lastmodified(const headers_t& meta)
{
headers_t::const_iterator iter = meta.find("Last-Modified");
if(meta.end() == iter){
auto iter = meta.find("Last-Modified");
if(meta.cend() == iter){
return -1;
}
return get_lastmodified((*iter).second.c_str());
@ -293,21 +394,21 @@ bool is_need_check_obj_detail(const headers_t& meta)
return false;
}
// if the object has x-amz-meta information, checking is no more.
if(meta.end() != meta.find("x-amz-meta-mode") ||
meta.end() != meta.find("x-amz-meta-mtime") ||
meta.end() != meta.find("x-amz-meta-ctime") ||
meta.end() != meta.find("x-amz-meta-atime") ||
meta.end() != meta.find("x-amz-meta-uid") ||
meta.end() != meta.find("x-amz-meta-gid") ||
meta.end() != meta.find("x-amz-meta-owner") ||
meta.end() != meta.find("x-amz-meta-group") ||
meta.end() != meta.find("x-amz-meta-permissions") )
if(meta.cend() != meta.find("x-amz-meta-mode") ||
meta.cend() != meta.find("x-amz-meta-mtime") ||
meta.cend() != meta.find("x-amz-meta-ctime") ||
meta.cend() != meta.find("x-amz-meta-atime") ||
meta.cend() != meta.find("x-amz-meta-uid") ||
meta.cend() != meta.find("x-amz-meta-gid") ||
meta.cend() != meta.find("x-amz-meta-owner") ||
meta.cend() != meta.find("x-amz-meta-group") ||
meta.cend() != meta.find("x-amz-meta-permissions") )
{
return false;
}
// if there is not Content-Type, or Content-Type is "x-directory",
// checking is no more.
if(meta.end() == (iter = meta.find("Content-Type"))){
if(meta.cend() == (iter = meta.find("Content-Type"))){
return false;
}
if("application/x-directory" == (*iter).second){
@ -322,8 +423,8 @@ bool is_need_check_obj_detail(const headers_t& meta)
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist)
{
bool added = false;
for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){
if(add_noexist || base.find(iter->first) != base.end()){
for(auto iter = additional.cbegin(); iter != additional.cend(); ++iter){
if(add_noexist || base.find(iter->first) != base.cend()){
base[iter->first] = iter->second;
added = true;
}
@ -331,6 +432,57 @@ bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexis
return added;
}
bool convert_header_to_stat(const std::string& strpath, const headers_t& meta, struct stat& stbuf, bool forcedir)
{
stbuf = {};
// set hard link count always 1
stbuf.st_nlink = 1; // see fuse FAQ
// mode
stbuf.st_mode = get_mode(meta, strpath, true, forcedir);
// blocks
if(S_ISREG(stbuf.st_mode)){
stbuf.st_blocks = get_blocks(stbuf.st_size);
}
stbuf.st_blksize = 4096;
// mtime
struct timespec mtime = get_mtime(meta);
if(mtime.tv_sec < 0){
mtime = {0, 0};
}
set_timespec_to_stat(stbuf, stat_time_type::MTIME, mtime);
// ctime
struct timespec ctime = get_ctime(meta);
if(ctime.tv_sec < 0){
ctime = {0, 0};
}
set_timespec_to_stat(stbuf, stat_time_type::CTIME, ctime);
// atime
struct timespec atime = get_atime(meta);
if(atime.tv_sec < 0){
atime = {0, 0};
}
set_timespec_to_stat(stbuf, stat_time_type::ATIME, atime);
// size
if(S_ISDIR(stbuf.st_mode)){
stbuf.st_size = 4096;
}else{
stbuf.st_size = get_size(meta);
}
// uid/gid
stbuf.st_uid = get_uid(meta);
stbuf.st_gid = get_gid(meta);
return true;
}
/*
* Local variables:
* tab-width: 4

View File

@ -21,21 +21,16 @@
#ifndef S3FS_METAHEADER_H_
#define S3FS_METAHEADER_H_
#include <string>
#include <strings.h>
#include <map>
#include <string>
#include <sys/stat.h>
#include "types.h"
//-------------------------------------------------------------------
// headers_t
//-------------------------------------------------------------------
struct header_nocase_cmp
{
bool operator()(const std::string &strleft, const std::string &strright) const
{
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
}
};
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
typedef std::map<std::string, std::string, case_insensitive_compare_func> headers_t;
//-------------------------------------------------------------------
// Functions
@ -47,6 +42,10 @@ off_t get_size(const char *s);
off_t get_size(const headers_t& meta);
mode_t get_mode(const char *s, int base = 0);
mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir = false, bool forcedir = false);
bool is_reg_fmt(const headers_t& meta);
bool is_symlink_fmt(const headers_t& meta);
bool is_dir_fmt(const headers_t& meta);
objtype_t derive_object_type(const std::string& strpath, const headers_t& meta, objtype_t default_type = objtype_t::UNKNOWN);
uid_t get_uid(const char *s);
uid_t get_uid(const headers_t& meta);
gid_t get_gid(const char *s);
@ -57,7 +56,7 @@ time_t get_lastmodified(const char* s);
time_t get_lastmodified(const headers_t& meta);
bool is_need_check_obj_detail(const headers_t& meta);
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist);
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
bool convert_header_to_stat(const std::string& strpath, const headers_t& meta, struct stat& stbuf, bool forcedir = false);
#endif // S3FS_METAHEADER_H_

View File

@ -20,6 +20,7 @@
#include <cstdio>
#include <cstdlib>
#include <string>
#include "s3fs.h"
#include "s3fs_logger.h"
@ -28,6 +29,7 @@
#include "s3fs_xml.h"
#include "s3fs_auth.h"
#include "string_util.h"
#include "s3fs_threadreqs.h"
//-------------------------------------------------------------------
// Global variables
@ -47,7 +49,7 @@ static void print_incomp_mpu_list(const incomp_mpu_list_t& list)
printf("---------------------------------------------------------------\n");
int cnt = 0;
for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
for(auto iter = list.cbegin(); iter != list.cend(); ++iter, ++cnt){
printf(" Path : %s\n", (*iter).key.c_str());
printf(" UploadId : %s\n", (*iter).id.c_str());
printf(" Date : %s\n", (*iter).date.c_str());
@ -68,9 +70,8 @@ static bool abort_incomp_mpu_list(const incomp_mpu_list_t& list, time_t abort_ti
time_t now_time = time(nullptr);
// do removing.
S3fsCurl s3fscurl;
bool result = true;
for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter){
for(auto iter = list.cbegin(); iter != list.cend(); ++iter){
const char* tpath = (*iter).key.c_str();
std::string upload_id = (*iter).id;
@ -85,15 +86,12 @@ static bool abort_incomp_mpu_list(const incomp_mpu_list_t& list, time_t abort_ti
}
}
if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){
if(0 != abort_multipart_upload_request(tpath, upload_id)){
S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath);
result = false;
}else{
printf("Succeed to remove %s multipart uploading object.\n", tpath);
}
// reset(initialize) curl object
s3fscurl.DestroyCurlHandle();
}
return result;
}
@ -115,15 +113,15 @@ int s3fs_utility_processing(time_t abort_time)
// parse result(incomplete multipart upload information)
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
xmlDocPtr doc;
if(nullptr == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", nullptr, 0))){
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", nullptr, 0), xmlFreeDoc);
if(nullptr == doc){
S3FS_PRN_DBG("xmlReadMemory exited with error.");
result = EXIT_FAILURE;
}else{
// make incomplete uploads list
incomp_mpu_list_t list;
if(!get_incomp_mpu_list(doc, list)){
if(!get_incomp_mpu_list(doc.get(), list)){
S3FS_PRN_DBG("get_incomp_mpu_list exited with error.");
result = EXIT_FAILURE;
@ -139,7 +137,6 @@ int s3fs_utility_processing(time_t abort_time)
}
}
}
S3FS_XMLFREEDOC(doc);
}
}

View File

@ -21,6 +21,8 @@
#ifndef S3FS_MPU_UTIL_H_
#define S3FS_MPU_UTIL_H_
#include <cstdint>
#include <ctime>
#include <string>
#include <vector>
@ -39,7 +41,7 @@ typedef std::vector<INCOMP_MPU_INFO> incomp_mpu_list_t;
//-------------------------------------------------------------------
// enum for utility process mode
//-------------------------------------------------------------------
enum class utility_incomp_type{
enum class utility_incomp_type : uint8_t {
NO_UTILITY_MODE = 0, // not utility mode
INCOMP_TYPE_LIST, // list of incomplete mpu
INCOMP_TYPE_ABORT // delete incomplete mpu

View File

@ -22,7 +22,6 @@
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <pthread.h>
#include <unistd.h>
#include <syslog.h>
#include <sys/types.h>
@ -127,7 +126,7 @@ static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t ke
PK11_FreeSymKey(pKey);
PK11_FreeSlot(Slot);
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen]);
auto digest = std::make_unique<unsigned char[]>(*digestlen);
memcpy(digest.get(), tmpdigest, *digestlen);
return digest;
@ -176,10 +175,9 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
for(off_t total = 0; total < size; total += bytes){
off_t len = 512;
unsigned char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<unsigned char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -189,12 +187,12 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
PK11_DestroyContext(md5ctx, PR_TRUE);
return false;
}
PK11_DigestOp(md5ctx, buf, bytes);
PK11_DigestOp(md5ctx, buf.data(), bytes);
}
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
PK11_DestroyContext(md5ctx, PR_TRUE);
return false;
return true;
}
//-------------------------------------------------------------------
@ -230,10 +228,9 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
for(off_t total = 0; total < size; total += bytes){
off_t len = 512;
unsigned char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<unsigned char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -243,7 +240,7 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
PK11_DestroyContext(sha256ctx, PR_TRUE);
return false;
}
PK11_DigestOp(sha256ctx, buf, bytes);
PK11_DigestOp(sha256ctx, buf.data(), bytes);
}
PK11_DigestFinal(sha256ctx, result->data(), &sha256outlen, result->size());
PK11_DestroyContext(sha256ctx, PR_TRUE);

View File

@ -25,16 +25,16 @@
#include <cstdio>
#include <cstdlib>
#include <cerrno>
#include <pthread.h>
#include <memory>
#include <mutex>
#include <unistd.h>
#include <sys/stat.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include <openssl/crypto.h>
#include <openssl/err.h>
#include <string>
#include <thread>
#include "s3fs_auth.h"
#include "s3fs_logger.h"
@ -80,26 +80,19 @@ bool s3fs_destroy_global_ssl()
// internal use struct for openssl
struct CRYPTO_dynlock_value
{
pthread_mutex_t dyn_mutex;
std::mutex dyn_mutex;
};
static pthread_mutex_t* s3fs_crypt_mutex = nullptr;
static std::unique_ptr<std::mutex[]> s3fs_crypt_mutex;
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused)) NO_THREAD_SAFETY_ANALYSIS;
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
{
if(s3fs_crypt_mutex){
int result;
if(mode & CRYPTO_LOCK){
if(0 != (result = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
abort();
}
s3fs_crypt_mutex[pos].lock();
}else{
if(0 != (result = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", result);
abort();
}
s3fs_crypt_mutex[pos].unlock();
}
}
}
@ -107,43 +100,23 @@ static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused));
static unsigned long s3fs_crypt_get_threadid()
{
// For FreeBSD etc, some system's pthread_t is structure pointer.
// Then we use cast like C style(not C++) instead of ifdef.
return (unsigned long)(pthread_self());
return static_cast<unsigned long>(std::hash<std::thread::id>()(std::this_thread::get_id()));
}
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused));
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
{
struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value();
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", result);
return nullptr;
}
return dyndata;
return new CRYPTO_dynlock_value();
}
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)) NO_THREAD_SAFETY_ANALYSIS;
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
{
if(dyndata){
int result;
if(mode & CRYPTO_LOCK){
if(0 != (result = pthread_mutex_lock(&(dyndata->dyn_mutex)))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
abort();
}
dyndata->dyn_mutex.lock();
}else{
if(0 != (result = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", result);
abort();
}
dyndata->dyn_mutex.unlock();
}
}
}
@ -151,14 +124,7 @@ static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyn
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
{
if(dyndata){
int result = pthread_mutex_destroy(&(dyndata->dyn_mutex));
if(result != 0){
S3FS_PRN_CRIT("failed to destroy dyn_mutex");
abort();
}
delete dyndata;
}
delete dyndata;
}
bool s3fs_init_crypt_mutex()
@ -173,19 +139,7 @@ bool s3fs_init_crypt_mutex()
return false;
}
}
s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()];
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
int result = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
if(result != 0){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", result);
return false;
}
}
s3fs_crypt_mutex = std::make_unique<std::mutex[]>(CRYPTO_num_locks());
// static lock
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
CRYPTO_set_id_callback(s3fs_crypt_get_threadid);
@ -209,16 +163,8 @@ bool s3fs_destroy_crypt_mutex()
CRYPTO_set_id_callback(nullptr);
CRYPTO_set_locking_callback(nullptr);
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
int result = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
if(result != 0){
S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt);
abort();
}
}
CRYPTO_cleanup_all_ex_data();
delete[] s3fs_crypt_mutex;
s3fs_crypt_mutex = nullptr;
s3fs_crypt_mutex.reset();
return true;
}
@ -232,7 +178,7 @@ static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t ke
return nullptr;
}
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen]);
auto digest = std::make_unique<unsigned char[]>(*digestlen);
if(is_sha256){
HMAC(EVP_sha256(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
}else{
@ -263,7 +209,7 @@ std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, co
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
{
unsigned int digestlen = static_cast<unsigned int>(digest->size());
auto digestlen = static_cast<unsigned int>(digest->size());
const EVP_MD* md = EVP_get_digestbyname("md5");
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
@ -277,8 +223,7 @@ bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
{
EVP_MD_CTX* mdctx;
unsigned int md5_digest_len = static_cast<unsigned int>(result->size());
auto md5_digest_len = static_cast<unsigned int>(result->size());
off_t bytes;
if(-1 == size){
@ -290,30 +235,27 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
}
// instead of MD5_Init
mdctx = EVP_MD_CTX_new();
EVP_DigestInit_ex(mdctx, EVP_md5(), nullptr);
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)> mdctx(EVP_MD_CTX_new(), EVP_MD_CTX_free);
EVP_DigestInit_ex(mdctx.get(), EVP_md5(), nullptr);
for(off_t total = 0; total < size; total += bytes){
const off_t len = 512;
char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
}else if(-1 == bytes){
// error
S3FS_PRN_ERR("file read error(%d)", errno);
EVP_MD_CTX_free(mdctx);
return false;
}
// instead of MD5_Update
EVP_DigestUpdate(mdctx, buf, bytes);
EVP_DigestUpdate(mdctx.get(), buf.data(), bytes);
}
// instead of MD5_Final
EVP_DigestFinal_ex(mdctx, result->data(), &md5_digest_len);
EVP_MD_CTX_free(mdctx);
EVP_DigestFinal_ex(mdctx.get(), result->data(), &md5_digest_len);
return true;
}
@ -354,10 +296,9 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
MD5_Init(&md5ctx);
for(off_t total = 0; total < size; total += bytes){
const off_t len = 512;
char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -366,7 +307,7 @@ bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
S3FS_PRN_ERR("file read error(%d)", errno);
return false;
}
MD5_Update(&md5ctx, buf, bytes);
MD5_Update(&md5ctx, buf.data(), bytes);
}
MD5_Final(result->data(), &md5ctx);
@ -384,7 +325,7 @@ bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(mdctx, md, nullptr);
EVP_DigestUpdate(mdctx, data, datalen);
unsigned int digestlen = static_cast<unsigned int>(digest->size());
auto digestlen = static_cast<unsigned int>(digest->size());
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
EVP_MD_CTX_destroy(mdctx);
@ -413,10 +354,9 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
EVP_DigestInit_ex(sha256ctx, md, nullptr);
for(off_t total = 0; total < size; total += bytes){
const off_t len = 512;
char buf[len];
bytes = len < (size - total) ? len : (size - total);
bytes = pread(fd, buf, bytes, start + total);
std::array<char, 512> buf;
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
bytes = pread(fd, buf.data(), bytes, start + total);
if(0 == bytes){
// end of file
break;
@ -426,7 +366,7 @@ bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
EVP_MD_CTX_destroy(sha256ctx);
return false;
}
EVP_DigestUpdate(sha256ctx, buf, bytes);
EVP_DigestUpdate(sha256ctx, buf.data(), bytes);
}
EVP_DigestFinal_ex(sha256ctx, result->data(), nullptr);
EVP_MD_CTX_destroy(sha256ctx);

View File

@ -24,6 +24,13 @@
//-------------------------------------------------------------------
// Class Semaphore
//-------------------------------------------------------------------
#if __cplusplus >= 202002L
#include <semaphore>
typedef std::counting_semaphore<INT_MAX> Semaphore;
#else
// portability wrapper for sem_t since macOS does not implement it
#ifdef __APPLE__
@ -36,8 +43,8 @@ class Semaphore
~Semaphore()
{
// macOS cannot destroy a semaphore with posts less than the initializer
for(int i = 0; i < get_value(); ++i){
post();
for(int i = 0; i < value; ++i){
release();
}
dispatch_release(sem);
}
@ -46,8 +53,8 @@ class Semaphore
Semaphore& operator=(const Semaphore&) = delete;
Semaphore& operator=(Semaphore&&) = delete;
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
bool try_wait()
void acquire() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
bool try_acquire()
{
if(0 == dispatch_semaphore_wait(sem, DISPATCH_TIME_NOW)){
return true;
@ -55,32 +62,37 @@ class Semaphore
return false;
}
}
void post() { dispatch_semaphore_signal(sem); }
int get_value() const { return value; }
void release() { dispatch_semaphore_signal(sem); }
private:
const int value;
int value;
dispatch_semaphore_t sem;
};
#else
#include <errno.h>
#include <cerrno>
#include <semaphore.h>
class Semaphore
{
public:
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
explicit Semaphore(int value) { sem_init(&mutex, 0, value); }
~Semaphore() { sem_destroy(&mutex); }
void wait()
Semaphore(const Semaphore&) = delete;
Semaphore(Semaphore&&) = delete;
Semaphore& operator=(const Semaphore&) = delete;
Semaphore& operator=(Semaphore&&) = delete;
void acquire()
{
int r;
do {
r = sem_wait(&mutex);
} while (r == -1 && errno == EINTR);
}
bool try_wait()
bool try_acquire()
{
int result;
do{
@ -89,16 +101,17 @@ class Semaphore
return (0 == result);
}
void post() { sem_post(&mutex); }
int get_value() const { return value; }
void release() { sem_post(&mutex); }
private:
const int value;
sem_t mutex;
};
#endif
#endif
#endif // S3FS_SEMAPHORE_H_
/*

File diff suppressed because it is too large Load Diff

View File

@ -33,53 +33,6 @@
} \
}while(0)
// [NOTE]
// s3fs use many small allocated chunk in heap area for stats
// cache and parsing xml, etc. The OS may decide that giving
// this little memory back to the kernel will cause too much
// overhead and delay the operation.
// Address of gratitude, this workaround quotes a document of
// libxml2.( http://xmlsoft.org/xmlmem.html )
//
// When valgrind is used to test memory leak of s3fs, a large
// amount of chunk may be reported. You can check the memory
// release accurately by defining the S3FS_MALLOC_TRIM flag
// and building it. Also, when executing s3fs, you can define
// the MMAP_THRESHOLD environment variable and check more
// accurate memory leak.( see, man 3 free )
//
#ifdef S3FS_MALLOC_TRIM
#ifdef HAVE_MALLOC_TRIM
#include <malloc.h>
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
#else // HAVE_MALLOC_TRIM
#define S3FS_MALLOCTRIM(pad)
#endif // HAVE_MALLOC_TRIM
#else // S3FS_MALLOC_TRIM
#define S3FS_MALLOCTRIM(pad)
#endif // S3FS_MALLOC_TRIM
#define S3FS_XMLFREEDOC(doc) \
do{ \
xmlFreeDoc(doc); \
S3FS_MALLOCTRIM(0); \
}while(0)
#define S3FS_XMLFREE(ptr) \
do{ \
xmlFree(ptr); \
S3FS_MALLOCTRIM(0); \
}while(0)
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
do{ \
xmlXPathFreeContext(ctx); \
S3FS_MALLOCTRIM(0); \
}while(0)
#define S3FS_XMLXPATHFREEOBJECT(obj) \
do{ \
xmlXPathFreeObject(obj); \
S3FS_MALLOCTRIM(0); \
}while(0)
#endif // S3FS_S3FS_H_
/*

View File

@ -22,10 +22,11 @@
#include <unistd.h>
#include <pwd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dlfcn.h>
#include <fstream>
#include <mutex>
#include <sstream>
#include <string>
#include "common.h"
#include "s3fs_cred.h"
@ -34,6 +35,10 @@
#include "curl.h"
#include "string_util.h"
#include "metaheader.h"
#include "threadpoolman.h"
#include "s3fs_threadreqs.h"
using namespace std::string_literals;
//-------------------------------------------------------------------
// Symbols
@ -48,7 +53,7 @@ static constexpr char DEFAULT_AWS_PROFILE_NAME[] = "default";
//
// detail=false ex. "Custom AWS Credential Library - v1.0.0"
// detail=true ex. "Custom AWS Credential Library - v1.0.0
// s3fs-fuse credential I/F library for S3 compatible strage X.
// s3fs-fuse credential I/F library for S3 compatible storage X.
// Copyright(C) 2022 Foo"
//
const char* VersionS3fsCredential(bool detail)
@ -88,9 +93,9 @@ bool FreeS3fsCredential(char** pperrstr)
return true;
}
bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
{
S3FS_PRN_INFO("Parameters : ppaccess_key_id=%p, ppserect_access_key=%p, ppaccess_token=%p, ptoken_expire=%p", ppaccess_key_id, ppserect_access_key, ppaccess_token, ptoken_expire);
S3FS_PRN_INFO("Parameters : ppaccess_key_id=%p, ppsecret_access_key=%p, ppaccess_token=%p, ptoken_expire=%p", ppaccess_key_id, ppsecret_access_key, ppaccess_token, ptoken_expire);
if(pperrstr){
*pperrstr = strdup("Check why built-in function was called, the external credential library must have UpdateS3fsCredential function.");
@ -101,8 +106,8 @@ bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, ch
if(ppaccess_key_id){
*ppaccess_key_id = nullptr;
}
if(ppserect_access_key){
*ppserect_access_key = nullptr;
if(ppsecret_access_key){
*ppsecret_access_key = nullptr;
}
if(ppaccess_token){
*ppaccess_token = nullptr;
@ -132,9 +137,9 @@ std::string S3fsCred::bucket_name;
//-------------------------------------------------------------------
// Class Methods
//-------------------------------------------------------------------
bool S3fsCred::SetBucket(const char* bucket)
bool S3fsCred::SetBucket(const std::string& bucket)
{
if(!bucket || strlen(bucket) == 0){
if(bucket.empty()){
return false;
}
S3fsCred::bucket_name = bucket;
@ -169,7 +174,6 @@ bool S3fsCred::ParseIAMRoleFromMetaDataResponse(const char* response, std::strin
// Methods : Constructor / Destructor
//-------------------------------------------------------------------
S3fsCred::S3fsCred() :
is_lock_init(false),
aws_profile(DEFAULT_AWS_PROFILE_NAME),
load_iamrole(false),
AWSAccessTokenExpire(0),
@ -188,31 +192,11 @@ S3fsCred::S3fsCred() :
pFuncCredFree(FreeS3fsCredential),
pFuncCredUpdate(UpdateS3fsCredential)
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&token_lock, &attr))){
S3FS_PRN_CRIT("failed to init token_lock: %d", result);
abort();
}
is_lock_init = true;
}
S3fsCred::~S3fsCred()
{
UnloadExtCredLib();
if(is_lock_init){
int result;
if(0 != (result = pthread_mutex_destroy(&token_lock))){
S3FS_PRN_CRIT("failed to destroy token_lock: %d", result);
abort();
}
is_lock_init = false;
}
}
//-------------------------------------------------------------------
@ -250,10 +234,8 @@ bool S3fsCred::SetIAMRoleMetadataType(bool flag)
return old;
}
bool S3fsCred::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey, AutoLock::Type type)
bool S3fsCred::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey)
{
AutoLock auto_lock(&token_lock, type);
if((!is_ibm_iam_auth && (!AccessKeyId || '\0' == AccessKeyId[0])) || !SecretAccessKey || '\0' == SecretAccessKey[0]){
return false;
}
@ -263,13 +245,11 @@ bool S3fsCred::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey
return true;
}
bool S3fsCred::SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken, AutoLock::Type type)
bool S3fsCred::SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken)
{
AutoLock auto_lock(&token_lock, type);
bool access_key_is_empty = !AccessKeyId || '\0' == AccessKeyId[0];
bool secret_access_key_is_empty = !SecretAccessKey || '\0' == SecretAccessKey[0];
bool session_token_is_empty = !SessionToken || '\0' == SessionToken[0];
bool access_key_is_empty = AccessKeyId == nullptr || '\0' == AccessKeyId[0];
bool secret_access_key_is_empty = SecretAccessKey == nullptr || '\0' == SecretAccessKey[0];
bool session_token_is_empty = SessionToken == nullptr || '\0' == SessionToken[0];
if((!is_ibm_iam_auth && access_key_is_empty) || secret_access_key_is_empty || session_token_is_empty){
return false;
@ -282,11 +262,9 @@ bool S3fsCred::SetAccessKeyWithSessionToken(const char* AccessKeyId, const char*
return true;
}
bool S3fsCred::IsSetAccessKeys(AutoLock::Type type) const
bool S3fsCred::IsSetAccessKeys() const
{
AutoLock auto_lock(&token_lock, type);
return IsSetIAMRole(AutoLock::ALREADY_LOCKED) || ((!AWSAccessKeyId.empty() || is_ibm_iam_auth) && !AWSSecretAccessKey.empty());
return IsSetIAMRole() || ((!AWSAccessKeyId.empty() || is_ibm_iam_auth) && !AWSSecretAccessKey.empty());
}
bool S3fsCred::SetIsECS(bool flag)
@ -310,25 +288,19 @@ bool S3fsCred::SetIsIBMIAMAuth(bool flag)
return old;
}
bool S3fsCred::SetIAMRole(const char* role, AutoLock::Type type)
bool S3fsCred::SetIAMRole(const char* role)
{
AutoLock auto_lock(&token_lock, type);
IAM_role = role ? role : "";
return true;
}
std::string S3fsCred::GetIAMRole(AutoLock::Type type) const
const std::string& S3fsCred::GetIAMRoleHasLock() const
{
AutoLock auto_lock(&token_lock, type);
return IAM_role;
}
bool S3fsCred::IsSetIAMRole(AutoLock::Type type) const
bool S3fsCred::IsSetIAMRole() const
{
AutoLock auto_lock(&token_lock, type);
return !IAM_role.empty();
}
@ -360,15 +332,15 @@ std::string S3fsCred::SetIAMExpiryField(const char* expiry_field)
return old;
}
bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoLock::Type type)
bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role)
{
// check
if(check_iam_role && !is_ecs && !IsIBMIAMAuth()){
if(!IsSetIAMRole(type)) {
if(!IsSetIAMRole()) {
S3FS_PRN_ERR("IAM role name is empty.");
return false;
}
S3FS_PRN_INFO3("[IAM role=%s]", GetIAMRole(type).c_str());
S3FS_PRN_INFO3("[IAM role=%s]", GetIAMRoleHasLock().c_str());
}
if(is_ecs){
@ -387,18 +359,16 @@ bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoL
// To avoid deadlocking, do not manipulate the S3fsCred object
// in the S3fsCurl::GetIAMv2ApiToken method (when retrying).
//
AutoLock auto_lock(&token_lock, type); // Lock for IAM_api_version, IAMv2_api_token
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
S3fsCurl s3fscurl;
if(GetIMDSVersion() > 1){
std::string token;
int result = s3fscurl.GetIAMv2ApiToken(S3fsCred::IAMv2_token_url, S3fsCred::IAMv2_token_ttl, S3fsCred::IAMv2_token_ttl_hdr, token);
int result = get_iamv2api_token_request(S3fsCred::IAMv2_token_url, S3fsCred::IAMv2_token_ttl, S3fsCred::IAMv2_token_ttl_hdr, token);
if(-ENOENT == result){
// If we get a 404 back when requesting the token service,
// then it's highly likely we're running in an environment
// that doesn't support the AWS IMDSv2 API, so we'll skip
// the token retrieval in the future.
SetIMDSVersion(1, AutoLock::ALREADY_LOCKED);
SetIMDSVersionHasLock(1);
}else if(result != 0){
// If we get an unexpected error when retrieving the API
@ -409,13 +379,13 @@ bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoL
}else{
// Set token
if(!SetIAMv2APIToken(token, AutoLock::ALREADY_LOCKED)){
if(!SetIAMv2APITokenHasLock(token)){
S3FS_PRN_ERR("Error storing IMDSv2 API token(%s).", token.c_str());
}
}
}
if(check_iam_role){
url = IAM_cred_url + GetIAMRole(AutoLock::ALREADY_LOCKED);
url = IAM_cred_url + GetIAMRoleHasLock();
}else{
url = IAM_cred_url;
}
@ -423,28 +393,22 @@ bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoL
return true;
}
int S3fsCred::SetIMDSVersion(int version, AutoLock::Type type)
int S3fsCred::SetIMDSVersionHasLock(int version)
{
AutoLock auto_lock(&token_lock, type);
int old = IAM_api_version;
IAM_api_version = version;
return old;
}
int S3fsCred::GetIMDSVersion(AutoLock::Type type) const
int S3fsCred::GetIMDSVersion() const
{
AutoLock auto_lock(&token_lock, type);
return IAM_api_version;
}
bool S3fsCred::SetIAMv2APIToken(const std::string& token, AutoLock::Type type)
bool S3fsCred::SetIAMv2APITokenHasLock(const std::string& token)
{
S3FS_PRN_INFO3("Setting AWS IMDSv2 API token to %s", token.c_str());
AutoLock auto_lock(&token_lock, type);
if(token.empty()){
return false;
}
@ -452,10 +416,8 @@ bool S3fsCred::SetIAMv2APIToken(const std::string& token, AutoLock::Type type)
return true;
}
std::string S3fsCred::GetIAMv2APIToken(AutoLock::Type type) const
const std::string& S3fsCred::GetIAMv2APIToken() const
{
AutoLock auto_lock(&token_lock, type);
return IAMv2_api_token;
}
@ -467,38 +429,33 @@ std::string S3fsCred::GetIAMv2APIToken(AutoLock::Type type) const
// retry logic.
// Be careful not to deadlock whenever you change this logic.
//
bool S3fsCred::LoadIAMCredentials(AutoLock::Type type)
bool S3fsCred::LoadIAMCredentials()
{
// url(check iam role)
std::string url;
std::string striamtoken;
std::string stribmsecret;
std::string cred;
AutoLock auto_lock(&token_lock, type);
if(!GetIAMCredentialsURL(url, true, AutoLock::ALREADY_LOCKED)){
// get parameters(check iam role)
if(!GetIAMCredentialsURL(url, true)){
return false;
}
const char* iam_v2_token = nullptr;
std::string str_iam_v2_token;
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
str_iam_v2_token = GetIAMv2APIToken(AutoLock::ALREADY_LOCKED);
iam_v2_token = str_iam_v2_token.c_str();
if(GetIMDSVersion() > 1){
striamtoken = GetIAMv2APIToken();
}
const char* ibm_secret_access_key = nullptr;
std::string str_ibm_secret_access_key;
if(IsIBMIAMAuth()){
str_ibm_secret_access_key = AWSSecretAccessKey;
ibm_secret_access_key = str_ibm_secret_access_key.c_str();
stribmsecret = AWSSecretAccessKey;
}
S3fsCurl s3fscurl;
std::string response;
if(!s3fscurl.GetIAMCredentials(url.c_str(), iam_v2_token, ibm_secret_access_key, response)){
// Get IAM Credentials
if(0 == get_iamcred_request(url, striamtoken, stribmsecret, cred)){
S3FS_PRN_DBG("Succeed to set IAM credentials");
}else{
S3FS_PRN_ERR("Something error occurred, could not set IAM credentials.");
return false;
}
if(!SetIAMCredentials(response.c_str(), AutoLock::ALREADY_LOCKED)){
if(!SetIAMCredentials(cred.c_str())){
S3FS_PRN_ERR("Something error occurred, could not set IAM role name.");
return false;
}
@ -510,39 +467,42 @@ bool S3fsCred::LoadIAMCredentials(AutoLock::Type type)
//
bool S3fsCred::LoadIAMRoleFromMetaData()
{
AutoLock auto_lock(&token_lock);
if(!load_iamrole){
// nothing to do
return true;
}
std::string url;
std::string iamtoken;
{
const std::lock_guard<std::mutex> lock(token_lock);
if(load_iamrole){
// url(not check iam role)
std::string url;
if(!GetIAMCredentialsURL(url, false, AutoLock::ALREADY_LOCKED)){
if(!GetIAMCredentialsURL(url, false)){
return false;
}
const char* iam_v2_token = nullptr;
std::string str_iam_v2_token;
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
str_iam_v2_token = GetIAMv2APIToken(AutoLock::ALREADY_LOCKED);
iam_v2_token = str_iam_v2_token.c_str();
if(GetIMDSVersion() > 1){
iamtoken = GetIAMv2APIToken();
}
}
S3fsCurl s3fscurl;
std::string token;
if(!s3fscurl.GetIAMRoleFromMetaData(url.c_str(), iam_v2_token, token)){
return false;
}
// Get IAM Role token
std::string token;
if(0 != get_iamrole_request(url, iamtoken, token)){
S3FS_PRN_ERR("failed to get IAM Role token from meta data.");
return false;
}
if(!SetIAMRoleFromMetaData(token.c_str(), AutoLock::ALREADY_LOCKED)){
S3FS_PRN_ERR("Something error occurred, could not set IAM role name.");
return false;
}
S3FS_PRN_INFO("loaded IAM role name = %s", GetIAMRole(AutoLock::ALREADY_LOCKED).c_str());
// Set
if(!SetIAMRoleFromMetaData(token.c_str())){
S3FS_PRN_ERR("Something error occurred, could not set IAM role name.");
return false;
}
return true;
}
bool S3fsCred::SetIAMCredentials(const char* response, AutoLock::Type type)
bool S3fsCred::SetIAMCredentials(const char* response)
{
S3FS_PRN_INFO3("IAM credential response = \"%s\"", response);
@ -556,26 +516,39 @@ bool S3fsCred::SetIAMCredentials(const char* response, AutoLock::Type type)
return false;
}
AutoLock auto_lock(&token_lock, type);
AWSAccessToken = keyval[IAM_token_field];
auto aws_access_token = keyval.find(IAM_token_field);
if(aws_access_token == keyval.end()){
return false;
}
if(is_ibm_iam_auth){
auto access_token_expire = keyval.find(IAM_expiry_field);
off_t tmp_expire = 0;
if(!s3fs_strtoofft(&tmp_expire, keyval[IAM_expiry_field].c_str(), /*base=*/ 10)){
if(access_token_expire == keyval.end() || !s3fs_strtoofft(&tmp_expire, access_token_expire->second.c_str(), /*base=*/ 10)){
return false;
}
AWSAccessTokenExpire = static_cast<time_t>(tmp_expire);
}else{
AWSAccessKeyId = keyval[S3fsCred::IAMCRED_ACCESSKEYID];
AWSSecretAccessKey = keyval[S3fsCred::IAMCRED_SECRETACCESSKEY];
AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[IAM_expiry_field].c_str());
auto access_key_id = keyval.find(S3fsCred::IAMCRED_ACCESSKEYID);
auto secret_access_key = keyval.find(S3fsCred::IAMCRED_SECRETACCESSKEY);
auto access_token_expire = keyval.find(IAM_expiry_field);
if(access_key_id == keyval.end() || secret_access_key == keyval.end() || access_token_expire == keyval.end()){
return false;
}
AWSAccessKeyId = access_key_id->second;
AWSSecretAccessKey = secret_access_key->second;
AWSAccessTokenExpire = cvtIAMExpireStringToTime(access_token_expire->second.c_str());
}
AWSAccessToken = aws_access_token->second;
return true;
}
bool S3fsCred::SetIAMRoleFromMetaData(const char* response, AutoLock::Type type)
bool S3fsCred::SetIAMRoleFromMetaData(const char* response)
{
const std::lock_guard<std::mutex> lock(token_lock);
S3FS_PRN_INFO3("IAM role name response = \"%s\"", response ? response : "(null)");
std::string rolename;
@ -583,7 +556,7 @@ bool S3fsCred::SetIAMRoleFromMetaData(const char* response, AutoLock::Type type)
return false;
}
SetIAMRole(rolename.c_str(), type);
SetIAMRole(rolename.c_str());
return true;
}
@ -685,7 +658,6 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap)
std::string line;
size_t first_pos;
readline_t linelist;
readline_t::iterator iter;
// open passwd file
std::ifstream PF(passwd_file.c_str());
@ -716,7 +688,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap)
// read '=' type
kvmap_t kv;
for(iter = linelist.begin(); iter != linelist.end(); ++iter){
for(auto iter = linelist.cbegin(); iter != linelist.cend(); ++iter){
first_pos = iter->find_first_of('=');
if(first_pos == std::string::npos){
continue;
@ -727,7 +699,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap)
if(key.empty()){
continue;
}
if(kv.end() != kv.find(key)){
if(kv.cend() != kv.find(key)){
S3FS_PRN_WARN("same key name(%s) found in passwd file, skip this.", key.c_str());
continue;
}
@ -737,7 +709,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap)
resmap[S3fsCred::KEYVAL_FIELDS_TYPE] = kv;
// read ':' type
for(iter = linelist.begin(); iter != linelist.end(); ++iter){
for(auto iter = linelist.cbegin(); iter != linelist.cend(); ++iter){
first_pos = iter->find_first_of(':');
size_t last_pos = iter->find_last_of(':');
if(first_pos == std::string::npos){
@ -757,7 +729,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap)
accesskey = trim(iter->substr(0, first_pos));
secret = trim(iter->substr(first_pos + 1, std::string::npos));
}
if(resmap.end() != resmap.find(bucketname)){
if(resmap.cend() != resmap.find(bucketname)){
S3FS_PRN_EXIT("there are multiple entries for the same bucket(%s) in the passwd file.", (bucketname.empty() ? "default" : bucketname.c_str()));
return false;
}
@ -785,7 +757,7 @@ bool S3fsCred::ParseS3fsPasswdFile(bucketkvmap_t& resmap)
//
// only one default key pair is allowed, but not required
//
bool S3fsCred::ReadS3fsPasswdFile(AutoLock::Type type)
bool S3fsCred::ReadS3fsPasswdFile()
{
bucketkvmap_t bucketmap;
kvmap_t keyval;
@ -807,8 +779,8 @@ bool S3fsCred::ReadS3fsPasswdFile(AutoLock::Type type)
//
// check key=value type format.
//
bucketkvmap_t::iterator it = bucketmap.find(S3fsCred::KEYVAL_FIELDS_TYPE);
if(bucketmap.end() != it){
auto it = bucketmap.find(S3fsCred::KEYVAL_FIELDS_TYPE);
if(bucketmap.cend() != it){
// aws format
std::string access_key_id;
std::string secret_access_key;
@ -816,8 +788,8 @@ bool S3fsCred::ReadS3fsPasswdFile(AutoLock::Type type)
if(-1 == result){
return false;
}else if(1 == result){
// found ascess(secret) keys
if(!SetAccessKey(access_key_id.c_str(), secret_access_key.c_str(), type)){
// found access(secret) keys
if(!SetAccessKey(access_key_id.c_str(), secret_access_key.c_str())){
S3FS_PRN_EXIT("failed to set access key/secret key.");
return false;
}
@ -826,24 +798,24 @@ bool S3fsCred::ReadS3fsPasswdFile(AutoLock::Type type)
}
std::string bucket_key = S3fsCred::ALLBUCKET_FIELDS_TYPE;
if(!S3fsCred::bucket_name.empty() && bucketmap.end() != bucketmap.find(S3fsCred::bucket_name)){
if(!S3fsCred::bucket_name.empty() && bucketmap.cend() != bucketmap.find(S3fsCred::bucket_name)){
bucket_key = S3fsCred::bucket_name;
}
it = bucketmap.find(bucket_key);
if(bucketmap.end() == it){
if(bucketmap.cend() == it){
S3FS_PRN_EXIT("Not found access key/secret key in passwd file.");
return false;
}
keyval = it->second;
kvmap_t::iterator aws_accesskeyid_it = keyval.find(S3fsCred::AWS_ACCESSKEYID);
kvmap_t::iterator aws_secretkey_it = keyval.find(S3fsCred::AWS_SECRETKEY);
if(keyval.end() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){
auto aws_accesskeyid_it = keyval.find(S3fsCred::AWS_ACCESSKEYID);
auto aws_secretkey_it = keyval.find(S3fsCred::AWS_SECRETKEY);
if(keyval.cend() == aws_accesskeyid_it || keyval.end() == aws_secretkey_it){
S3FS_PRN_EXIT("Not found access key/secret key in passwd file.");
return false;
}
if(!SetAccessKey(aws_accesskeyid_it->second.c_str(), aws_secretkey_it->second.c_str(), type)){
if(!SetAccessKey(aws_accesskeyid_it->second.c_str(), aws_secretkey_it->second.c_str())){
S3FS_PRN_EXIT("failed to set internal data for access key/secret key from passwd file.");
return false;
}
@ -863,12 +835,12 @@ int S3fsCred::CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& ac
if(kvmap.empty()){
return 0;
}
kvmap_t::const_iterator str1_it = kvmap.find(str1);
kvmap_t::const_iterator str2_it = kvmap.find(str2);
if(kvmap.end() == str1_it && kvmap.end() == str2_it){
auto str1_it = kvmap.find(str1);
auto str2_it = kvmap.find(str2);
if(kvmap.cend() == str1_it && kvmap.end() == str2_it){
return 0;
}
if(kvmap.end() == str1_it || kvmap.end() == str2_it){
if(kvmap.cend() == str1_it || kvmap.end() == str2_it){
S3FS_PRN_EXIT("AWSAccesskey or AWSSecretkey is not specified.");
return -1;
}
@ -881,7 +853,7 @@ int S3fsCred::CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& ac
//
// Read Aws Credential File
//
bool S3fsCred::ReadAwsCredentialFile(const std::string &filename, AutoLock::Type type)
bool S3fsCred::ReadAwsCredentialFile(const std::string &filename)
{
// open passwd file
std::ifstream PF(filename.c_str());
@ -938,12 +910,12 @@ bool S3fsCred::ReadAwsCredentialFile(const std::string &filename, AutoLock::Type
S3FS_PRN_EXIT("AWS session token was expected but wasn't provided in aws/credentials file for profile: %s.", aws_profile.c_str());
return false;
}
if(!SetAccessKey(accesskey.c_str(), secret.c_str(), type)){
if(!SetAccessKey(accesskey.c_str(), secret.c_str())){
S3FS_PRN_EXIT("failed to set internal data for access key/secret key from aws credential file.");
return false;
}
}else{
if(!SetAccessKeyWithSessionToken(accesskey.c_str(), secret.c_str(), session_token.c_str(), type)){
if(!SetAccessKeyWithSessionToken(accesskey.c_str(), secret.c_str(), session_token.c_str())){
S3FS_PRN_EXIT("session token is invalid.");
return false;
}
@ -981,13 +953,13 @@ bool S3fsCred::InitialS3fsCredentials()
}
// 1 - keys specified on the command line
if(IsSetAccessKeys(AutoLock::NONE)){
if(IsSetAccessKeys()){
return true;
}
// 2 - was specified on the command line
if(IsSetPasswdFile()){
if(!ReadS3fsPasswdFile(AutoLock::NONE)){
if(!ReadS3fsPasswdFile()){
return false;
}
return true;
@ -1007,7 +979,7 @@ bool S3fsCred::InitialS3fsCredentials()
S3FS_PRN_INFO2("access key from env variables");
if(AWSSESSIONTOKEN != nullptr){
S3FS_PRN_INFO2("session token is available");
if(!SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN, AutoLock::NONE)){
if(!SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN)){
S3FS_PRN_EXIT("session token is invalid.");
return false;
}
@ -1018,7 +990,7 @@ bool S3fsCred::InitialS3fsCredentials()
return false;
}
}
if(!SetAccessKey(AWSACCESSKEYID, AWSSECRETACCESSKEY, AutoLock::NONE)){
if(!SetAccessKey(AWSACCESSKEYID, AWSSECRETACCESSKEY)){
S3FS_PRN_EXIT("if one access key is specified, both keys need to be specified.");
return false;
}
@ -1034,7 +1006,7 @@ bool S3fsCred::InitialS3fsCredentials()
S3FS_PRN_EXIT("AWS_CREDENTIAL_FILE: \"%s\" is not readable.", passwd_file.c_str());
return false;
}
if(!ReadS3fsPasswdFile(AutoLock::NONE)){
if(!ReadS3fsPasswdFile()){
return false;
}
return true;
@ -1042,8 +1014,8 @@ bool S3fsCred::InitialS3fsCredentials()
}
// 3b - check ${HOME}/.aws/credentials
std::string aws_credentials = std::string(getpwuid(getuid())->pw_dir) + "/.aws/credentials";
if(ReadAwsCredentialFile(aws_credentials, AutoLock::NONE)){
std::string aws_credentials = getpwuid(getuid())->pw_dir + "/.aws/credentials"s;
if(ReadAwsCredentialFile(aws_credentials)){
return true;
}else if(aws_profile != DEFAULT_AWS_PROFILE_NAME){
S3FS_PRN_EXIT("Could not find profile: %s in file: %s", aws_profile.c_str(), aws_credentials.c_str());
@ -1056,14 +1028,14 @@ bool S3fsCred::InitialS3fsCredentials()
passwd_file = HOME;
passwd_file += "/.passwd-s3fs";
if(IsReadableS3fsPasswdFile()){
if(!ReadS3fsPasswdFile(AutoLock::NONE)){
if(!ReadS3fsPasswdFile()){
return false;
}
// It is possible that the user's file was there but
// contained no key pairs i.e. commented out
// in that case, go look in the final location
if(IsSetAccessKeys(AutoLock::NONE)){
if(IsSetAccessKeys()){
return true;
}
}
@ -1072,7 +1044,7 @@ bool S3fsCred::InitialS3fsCredentials()
// 5 - from the system default location
passwd_file = "/etc/passwd-s3fs";
if(IsReadableS3fsPasswdFile()){
if(!ReadS3fsPasswdFile(AutoLock::NONE)){
if(!ReadS3fsPasswdFile()){
return false;
}
return true;
@ -1085,7 +1057,7 @@ bool S3fsCred::InitialS3fsCredentials()
//-------------------------------------------------------------------
// Methods : for IAM
//-------------------------------------------------------------------
bool S3fsCred::ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval)
bool S3fsCred::ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval) const
{
if(!response){
return false;
@ -1142,20 +1114,20 @@ bool S3fsCred::ParseIAMCredentialResponse(const char* response, iamcredmap_t& ke
bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string* secret_access_key, std::string* access_token)
{
AutoLock auto_lock(&token_lock);
const std::lock_guard<std::mutex> lock(token_lock);
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || IsSetIAMRole(AutoLock::ALREADY_LOCKED)){
if(AWSAccessTokenExpire < (time(nullptr) + S3fsCred::IAM_EXPIRE_MERGIN)){
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || IsSetIAMRole()){
if(AWSAccessTokenExpire < (time(nullptr) + S3fsCred::IAM_EXPIRE_MERGING)){
S3FS_PRN_INFO("IAM Access Token refreshing...");
// update
if(!IsSetExtCredLib()){
if(!LoadIAMCredentials(AutoLock::ALREADY_LOCKED)){
if(!LoadIAMCredentials()){
S3FS_PRN_ERR("Access Token refresh by built-in failed");
return false;
}
}else{
if(!UpdateExtCredentials(AutoLock::ALREADY_LOCKED)){
if(!UpdateExtCredentials()){
S3FS_PRN_ERR("Access Token refresh by %s(external credential library) failed", credlib.c_str());
return false;
}
@ -1172,10 +1144,10 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
*secret_access_key = AWSSecretAccessKey;
}
if(access_token){
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || is_use_session_token || IsSetIAMRole(AutoLock::ALREADY_LOCKED)){
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || is_use_session_token || IsSetIAMRole()){
*access_token = AWSAccessToken;
}else{
access_token->erase();
access_token->clear();
}
}
@ -1334,36 +1306,34 @@ bool S3fsCred::UnloadExtCredLib()
return true;
}
bool S3fsCred::UpdateExtCredentials(AutoLock::Type type)
bool S3fsCred::UpdateExtCredentials()
{
if(!hExtCredLib){
S3FS_PRN_CRIT("External Credential Library is not loaded, why?");
return false;
}
AutoLock auto_lock(&token_lock, type);
char* paccess_key_id = nullptr;
char* pserect_access_key = nullptr;
char* psecret_access_key = nullptr;
char* paccess_token = nullptr;
char* perrstr = nullptr;
long long token_expire = 0;
bool result = (*pFuncCredUpdate)(&paccess_key_id, &pserect_access_key, &paccess_token, &token_expire, &perrstr);
bool result = (*pFuncCredUpdate)(&paccess_key_id, &psecret_access_key, &paccess_token, &token_expire, &perrstr);
if(!result){
// error occurred
S3FS_PRN_ERR("Could not update credential by \"UpdateS3fsCredential\" function : %s", perrstr ? perrstr : "unknown");
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
}else if(!paccess_key_id || !pserect_access_key || !paccess_token || token_expire <= 0){
}else if(!paccess_key_id || !psecret_access_key || !paccess_token || token_expire <= 0){
// some variables are wrong
S3FS_PRN_ERR("After updating credential by \"UpdateS3fsCredential\" function, but some variables are wrong : paccess_key_id=%p, pserect_access_key=%p, paccess_token=%p, token_expire=%lld", paccess_key_id, pserect_access_key, paccess_token, token_expire);
S3FS_PRN_ERR("After updating credential by \"UpdateS3fsCredential\" function, but some variables are wrong : paccess_key_id=%p, psecret_access_key=%p, paccess_token=%p, token_expire=%lld", paccess_key_id, psecret_access_key, paccess_token, token_expire);
result = false;
}else{
// succeed updating
AWSAccessKeyId = paccess_key_id;
AWSSecretAccessKey = pserect_access_key;
AWSSecretAccessKey = psecret_access_key;
AWSAccessToken = paccess_token;
AWSAccessTokenExpire = token_expire;
}
@ -1376,8 +1346,8 @@ bool S3fsCred::UpdateExtCredentials(AutoLock::Type type)
}
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(pserect_access_key){
free(pserect_access_key);
if(psecret_access_key){
free(psecret_access_key);
}
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
@ -1402,6 +1372,8 @@ bool S3fsCred::UpdateExtCredentials(AutoLock::Type type)
//
int S3fsCred::DetectParam(const char* arg)
{
const std::lock_guard<std::mutex> lock(token_lock);
if(!arg){
S3FS_PRN_EXIT("parameter arg is empty(null)");
return -1;
@ -1419,7 +1391,7 @@ int S3fsCred::DetectParam(const char* arg)
SetIAMTokenField("\"access_token\"");
SetIAMExpiryField("\"expiration\"");
SetIAMFieldCount(2);
SetIMDSVersion(1, AutoLock::NONE);
SetIMDSVersionHasLock(1);
set_builtin_cred_opts = true;
return 0;
}
@ -1439,14 +1411,14 @@ int S3fsCred::DetectParam(const char* arg)
S3FS_PRN_EXIT("option ibm_iam_endpoint has invalid format, missing http / https protocol");
return -1;
}
endpoint_url = std::string(iam_endpoint) + "/identity/token";
endpoint_url = iam_endpoint + "/identity/token"s;
SetIAMCredentialsURL(endpoint_url.c_str());
set_builtin_cred_opts = true;
return 0;
}
if(0 == strcmp(arg, "imdsv1only")){
SetIMDSVersion(1, AutoLock::NONE);
SetIMDSVersionHasLock(1);
set_builtin_cred_opts = true;
return 0;
}
@ -1457,7 +1429,7 @@ int S3fsCred::DetectParam(const char* arg)
return -1;
}
SetIsECS(true);
SetIMDSVersion(1, AutoLock::NONE);
SetIMDSVersionHasLock(1);
SetIAMCredentialsURL("http://169.254.170.2");
SetIAMFieldCount(5);
set_builtin_cred_opts = true;
@ -1478,7 +1450,7 @@ int S3fsCred::DetectParam(const char* arg)
}else if(is_prefix(arg, "iam_role=")){
const char* role = strchr(arg, '=') + sizeof(char);
SetIAMRole(role, AutoLock::NONE);
SetIAMRole(role);
SetIAMRoleMetadataType(false);
set_builtin_cred_opts = true;
return 0;
@ -1550,6 +1522,7 @@ bool S3fsCred::CheckForbiddenBucketParams()
//
bool S3fsCred::CheckAllParams()
{
const std::lock_guard<std::mutex> lock(token_lock);
//
// Checking forbidden parameters for bucket
//
@ -1558,12 +1531,12 @@ bool S3fsCred::CheckAllParams()
}
// error checking of command line arguments for compatibility
if(S3fsCurl::IsPublicBucket() && IsSetAccessKeys(AutoLock::NONE)){
if(S3fsCurl::IsPublicBucket() && IsSetAccessKeys()){
S3FS_PRN_EXIT("specifying both public_bucket and the access keys options is invalid.");
return false;
}
if(IsSetPasswdFile() && IsSetAccessKeys(AutoLock::NONE)){
if(IsSetPasswdFile() && IsSetAccessKeys()){
S3FS_PRN_EXIT("specifying both passwd_file and the access keys options is invalid.");
return false;
}
@ -1572,7 +1545,7 @@ bool S3fsCred::CheckAllParams()
if(!InitialS3fsCredentials()){
return false;
}
if(!IsSetAccessKeys(AutoLock::NONE)){
if(!IsSetAccessKeys()){
S3FS_PRN_EXIT("could not establish security credentials, check documentation.");
return false;
}
@ -1604,7 +1577,7 @@ bool S3fsCred::CheckAllParams()
// Load and Initialize external credential library
if(IsSetExtCredLib() || IsSetExtCredLibOpts()){
if(!IsSetExtCredLib()){
S3FS_PRN_EXIT("The \"credlib_opts\"(%s) is specifyed but \"credlib\" option is not specified.", credlib_opts.c_str());
S3FS_PRN_EXIT("The \"credlib_opts\"(%s) is specified but \"credlib\" option is not specified.", credlib_opts.c_str());
return false;
}

View File

@ -21,8 +21,13 @@
#ifndef S3FS_CRED_H_
#define S3FS_CRED_H_
#include "autolock.h"
#include <map>
#include <mutex>
#include <string>
#include "common.h"
#include "s3fs_extcred.h"
#include "types.h"
//----------------------------------------------
// Typedefs
@ -45,7 +50,7 @@ class S3fsCred
static constexpr char AWS_ACCESSKEYID[] = "AWSAccessKeyId";
static constexpr char AWS_SECRETKEY[] = "AWSSecretKey";
static constexpr int IAM_EXPIRE_MERGIN = 20 * 60; // update timing
static constexpr int IAM_EXPIRE_MERGING = 20 * 60; // update timing
static constexpr char ECS_IAM_ENV_VAR[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
static constexpr char IAMCRED_ACCESSKEYID[] = "AccessKeyId";
static constexpr char IAMCRED_SECRETACCESSKEY[] = "SecretAccessKey";
@ -53,30 +58,29 @@ class S3fsCred
static std::string bucket_name;
mutable pthread_mutex_t token_lock;
bool is_lock_init;
mutable std::mutex token_lock;
std::string passwd_file;
std::string aws_profile;
bool load_iamrole;
std::string AWSAccessKeyId; // Protect exclusively
std::string AWSSecretAccessKey; // Protect exclusively
std::string AWSAccessToken; // Protect exclusively
time_t AWSAccessTokenExpire; // Protect exclusively
std::string AWSAccessKeyId GUARDED_BY(token_lock);
std::string AWSSecretAccessKey GUARDED_BY(token_lock);
std::string AWSAccessToken GUARDED_BY(token_lock);
time_t AWSAccessTokenExpire GUARDED_BY(token_lock);
bool is_ecs;
bool is_use_session_token;
bool is_ibm_iam_auth;
std::string IAM_cred_url;
int IAM_api_version; // Protect exclusively
std::string IAMv2_api_token; // Protect exclusively
int IAM_api_version GUARDED_BY(token_lock);
std::string IAMv2_api_token GUARDED_BY(token_lock);
size_t IAM_field_count;
std::string IAM_token_field;
std::string IAM_expiry_field;
std::string IAM_role; // Protect exclusively
std::string IAM_role GUARDED_BY(token_lock);
bool set_builtin_cred_opts; // true if options other than "credlib" is set
std::string credlib; // credlib(name or path)
@ -102,24 +106,34 @@ class S3fsCred
bool SetAwsProfileName(const char* profile_name);
bool SetIAMRoleMetadataType(bool flag);
bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey, AutoLock::Type type);
bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken, AutoLock::Type type);
bool IsSetAccessKeys(AutoLock::Type type) const;
bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey) REQUIRES(S3fsCred::token_lock);
bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken) REQUIRES(S3fsCred::token_lock);
bool IsSetAccessKeys() const REQUIRES(S3fsCred::token_lock);
bool SetIsECS(bool flag);
bool SetIsUseSessionToken(bool flag);
bool SetIsIBMIAMAuth(bool flag);
int SetIMDSVersion(int version, AutoLock::Type type);
int GetIMDSVersion(AutoLock::Type type) const;
int SetIMDSVersionHasLock(int version) REQUIRES(S3fsCred::token_lock);
int SetIMDSVersion(int version)
{
const std::lock_guard<std::mutex> lock(token_lock);
return SetIMDSVersionHasLock(version);
}
int GetIMDSVersion() const REQUIRES(S3fsCred::token_lock);
bool SetIAMv2APIToken(const std::string& token, AutoLock::Type type);
std::string GetIAMv2APIToken(AutoLock::Type type) const;
bool SetIAMv2APITokenHasLock(const std::string& token) REQUIRES(S3fsCred::token_lock);
const std::string& GetIAMv2APIToken() const REQUIRES(S3fsCred::token_lock);
bool SetIAMRole(const char* role, AutoLock::Type type);
std::string GetIAMRole(AutoLock::Type type) const;
bool IsSetIAMRole(AutoLock::Type type) const;
bool SetIAMRole(const char* role) REQUIRES(S3fsCred::token_lock);
const std::string& GetIAMRoleHasLock() const REQUIRES(S3fsCred::token_lock);
const std::string& GetIAMRole() const
{
const std::lock_guard<std::mutex> lock(token_lock);
return GetIAMRoleHasLock();
}
bool IsSetIAMRole() const REQUIRES(S3fsCred::token_lock);
size_t SetIAMFieldCount(size_t field_count);
std::string SetIAMCredentialsURL(const char* url);
std::string SetIAMTokenField(const char* token_field);
@ -128,18 +142,18 @@ class S3fsCred
bool IsReadableS3fsPasswdFile() const;
bool CheckS3fsPasswdFilePerms();
bool ParseS3fsPasswdFile(bucketkvmap_t& resmap);
bool ReadS3fsPasswdFile(AutoLock::Type type);
bool ReadS3fsPasswdFile() REQUIRES(S3fsCred::token_lock);
static int CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& access_key_id, std::string& secret_access_key);
bool ReadAwsCredentialFile(const std::string &filename, AutoLock::Type type);
bool ReadAwsCredentialFile(const std::string &filename) REQUIRES(S3fsCred::token_lock);
bool InitialS3fsCredentials();
bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
bool InitialS3fsCredentials() REQUIRES(S3fsCred::token_lock);
bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval) const;
bool GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoLock::Type type);
bool LoadIAMCredentials(AutoLock::Type type);
bool SetIAMCredentials(const char* response, AutoLock::Type type);
bool SetIAMRoleFromMetaData(const char* response, AutoLock::Type type);
bool GetIAMCredentialsURL(std::string& url, bool check_iam_role) REQUIRES(S3fsCred::token_lock);
bool LoadIAMCredentials() REQUIRES(S3fsCred::token_lock);
bool SetIAMCredentials(const char* response) REQUIRES(S3fsCred::token_lock);
bool SetIAMRoleFromMetaData(const char* response);
bool SetExtCredLib(const char* arg);
bool IsSetExtCredLib() const;
@ -149,12 +163,12 @@ class S3fsCred
bool InitExtCredLib();
bool LoadExtCredLib();
bool UnloadExtCredLib();
bool UpdateExtCredentials(AutoLock::Type type);
bool UpdateExtCredentials() REQUIRES(S3fsCred::token_lock);
static bool CheckForbiddenBucketParams();
public:
static bool SetBucket(const char* bucket);
static bool SetBucket(const std::string& bucket);
static const std::string& GetBucket();
S3fsCred();

View File

@ -88,8 +88,8 @@ extern bool FreeS3fsCredential(char** pperrstr) S3FS_FUNCATTR_WEAK;
//
// char** ppaccess_key_id : Allocate and set "Access Key ID" string
// area to *ppaccess_key_id.
// char** ppserect_access_key : Allocate and set "Access Secret Key ID"
// string area to *ppserect_access_key.
// char** ppsecret_access_key : Allocate and set "Access Secret Key ID"
// string area to *ppsecret_access_key.
// char** ppaccess_token : Allocate and set "Token" string area to
// *ppaccess_token.
// long long* ptoken_expire : Set token expire time(time_t) value to
@ -105,7 +105,7 @@ extern bool FreeS3fsCredential(char** pperrstr) S3FS_FUNCATTR_WEAK;
// For all argument of the character string pointer(char **) set the
// allocated string area. The allocated area is freed by the caller.
//
extern bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr) S3FS_FUNCATTR_WEAK;
extern bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr) S3FS_FUNCATTR_WEAK;
//---------------------------------------------------------
// Typedef Prototype function
@ -126,9 +126,9 @@ typedef bool (*fp_InitS3fsCredential)(const char* popts, char** pperrstr);
typedef bool (*fp_FreeS3fsCredential)(char** pperrstr);
//
// bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
// bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
//
typedef bool (*fp_UpdateS3fsCredential)(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr);
typedef bool (*fp_UpdateS3fsCredential)(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr);
} // extern "C"

View File

@ -20,6 +20,8 @@
#include <string>
#include "common.h"
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
@ -31,10 +33,21 @@ bool noxmlns = false;
std::string program_name;
std::string service_path = "/";
std::string s3host = "https://s3.amazonaws.com";
std::string endpoint = "us-east-1";
std::string region = "us-east-1";
std::string cipher_suites;
std::string instance_name;
std::atomic<long long unsigned> num_requests_head_object;
std::atomic<long long unsigned> num_requests_put_object;
std::atomic<long long unsigned> num_requests_get_object;
std::atomic<long long unsigned> num_requests_delete_object;
std::atomic<long long unsigned> num_requests_list_bucket;
std::atomic<long long unsigned> num_requests_mpu_initiate;
std::atomic<long long unsigned> num_requests_mpu_complete;
std::atomic<long long unsigned> num_requests_mpu_abort;
std::atomic<long long unsigned> num_requests_mpu_upload_part;
std::atomic<long long unsigned> num_requests_mpu_copy_part;
/*
* Local variables:
* tab-width: 4

View File

@ -194,21 +194,28 @@ static constexpr char help_string[] =
"\n"
" stat_cache_expire (default is 900))\n"
" - specify expire time (seconds) for entries in the stat cache.\n"
" This expire time indicates the time since stat cached. and this\n"
" is also set to the expire time of the symbolic link cache.\n"
" This expire time indicates the time since stat cached.\n"
"\n"
" stat_cache_interval_expire (default is 900)\n"
" - specify expire time (seconds) for entries in the stat cache(and\n"
" symbolic link cache).\n"
" - specify expire time (seconds) for entries in the stat cache.\n"
" This expire time is based on the time from the last access time\n"
" of the stat cache. This option is exclusive with stat_cache_expire,\n"
" and is left for compatibility with older versions.\n"
"\n"
" disable_noobj_cache (default is enable)\n"
" - By default s3fs memorizes when an object does not exist up until\n"
" the stat cache timeout. This caching can cause staleness for\n"
" applications. If disabled, s3fs will not memorize objects and may\n"
" cause extra HeadObject requests and reduce performance.\n"
" enable_negative_cache (default is enabled negative cache)\n"
" - This option will keep non-existence of objects in a stat cache.\n"
" When this negative cache is enabled, it will not process extra\n"
" HeadObject requests to search for non-existent objects, improving\n"
" performance.\n"
" This feature is enabled by default, so there is no need to specify\n"
" it.\n"
"\n"
" disable_negative_cache (default is enabled negative cache)\n"
" - By default, s3fs keeps non-existent objects in the stat cache.\n"
" This option disables this negative caching.\n"
" This prevents delays in updates due to cache retention.\n"
" However, it may increase the number of HeadObject requests to check\n"
" if an object exists, which may decrease performance.\n"
"\n"
" no_check_certificate\n"
" - server certificate won't be checked against the available \n"
@ -217,6 +224,21 @@ static constexpr char help_string[] =
" ssl_verify_hostname (default=\"2\")\n"
" - When 0, do not verify the SSL certificate against the hostname.\n"
"\n"
" ssl_client_cert (default=\"\")\n"
" - Specify an SSL client certificate.\n"
" Specify this optional parameter in the following format:\n"
" \"<SSL Cert>[:<Cert Type>[:<Private Key>[:<Key Type>\n"
" [:<Password>]]]]\"\n"
" <SSL Cert>: Client certificate.\n"
" Specify the file path or NickName(for NSS, etc.).\n"
" <Cert Type>: Type of certificate, default is \"PEM\"(optional).\n"
" <Private Key>: Certificate's private key file(optional).\n"
" <Key Type>: Type of private key, default is \"PEM\"(optional).\n"
" <Password>: Passphrase of the private key(optional).\n"
" It is also possible to omit this value and specify\n"
" it using the environment variable\n"
" \"S3FS_SSL_PRIVKEY_PASSWORD\".\n"
"\n"
" nodnscache (disable DNS cache)\n"
" - s3fs is always using DNS cache, this option make DNS cache disable.\n"
"\n"
@ -224,17 +246,6 @@ static constexpr char help_string[] =
" - s3fs is always using SSL session cache, this option make SSL \n"
" session cache disable.\n"
"\n"
" multireq_max (default=\"20\")\n"
" - maximum number of parallel request for listing objects.\n"
"\n"
" parallel_count (default=\"5\")\n"
" - number of parallel request for uploading big objects.\n"
" s3fs uploads large object (over 20MB) by multipart post request, \n"
" and sends parallel requests.\n"
" This option limits parallel request count which s3fs requests \n"
" at once. It is necessary to set this value depending on a CPU \n"
" and a network band.\n"
"\n"
" multipart_size (default=\"10\")\n"
" - part size, in MB, for each multipart request.\n"
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
@ -301,8 +312,8 @@ static constexpr char help_string[] =
" If you do not use https, please specify the URL with the url\n"
" option.\n"
"\n"
" endpoint (default=\"us-east-1\")\n"
" - sets the endpoint to use on signature version 4\n"
" region (default=\"us-east-1\")\n"
" - sets the region to use on signature version 4\n"
" If this option is not specified, s3fs uses \"us-east-1\" region as\n"
" the default. If the s3fs could not connect to the region specified\n"
" by this option, s3fs could not run. But if you do not specify this\n"
@ -310,6 +321,7 @@ static constexpr char help_string[] =
" will retry to automatically connect to the other region. So s3fs\n"
" can know the correct region name, because s3fs can find it in an\n"
" error from the S3 server.\n"
" You can also specify the legacy -o endpoint which means the same thing.\n"
"\n"
" sigv2 (default is signature version 4 falling back to version 2)\n"
" - sets signing AWS requests by using only signature version 2\n"
@ -342,11 +354,12 @@ static constexpr char help_string[] =
" Note that this option is still experimental and may change in the\n"
" future.\n"
"\n"
" max_thread_count (default is \"5\")\n"
" - Specifies the number of threads waiting for stream uploads.\n"
" Note that this option and Streamm Upload are still experimental\n"
" and subject to change in the future.\n"
" This option will be merged with \"parallel_count\" in the future.\n"
" max_thread_count (default is \"10\")\n"
" - This value is the maximum number of parallel requests to be\n"
" sent, and the number of parallel processes for head requests,\n"
" multipart uploads and stream uploads.\n"
" Worker threads will be started to process requests according to\n"
" this value.\n"
"\n"
" enable_content_md5 (default is disable)\n"
" - Allow S3 server to check data integrity of uploads via the\n"
@ -527,6 +540,14 @@ static constexpr char help_string[] =
" Separate the username and passphrase with a ':' character and\n"
" specify each as a URL-encoded string.\n"
"\n"
" ipresolve (default=\"whatever\")\n"
" Select what type of IP addresses to use when establishing a\n"
" connection.\n"
" Default('whatever') can use addresses of all IP versions(IPv4 and\n"
" IPv6) that your system allows. If you specify 'IPv4', only IPv4\n"
" addresses are used. And when 'IPv6'is specified, only IPv6 addresses\n"
" will be used.\n"
"\n"
" logfile - specify the log output file.\n"
" s3fs outputs the log file to syslog. Alternatively, if s3fs is\n"
" started with the \"-f\" option specified, the log will be output\n"
@ -630,7 +651,7 @@ void show_help()
void show_version()
{
printf(
"Amazon Simple Storage Service File System V%s (commit:%s) with %s\n"
"Amazon Simple Storage Service File System V%s%s with %s\n"
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
"License GPL2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
@ -640,7 +661,7 @@ void show_version()
const char* short_version()
{
static constexpr char short_ver[] = "s3fs version " VERSION "(" COMMIT_HASH_VAL ")";
static constexpr char short_ver[] = "s3fs version " VERSION "" COMMIT_HASH_VAL;
return short_ver;
}

View File

@ -23,6 +23,7 @@
#include <memory>
#include <sstream>
#include <string>
#include <strings.h>
#include "common.h"
#include "s3fs_logger.h"
@ -34,7 +35,7 @@ constexpr char S3fsLog::LOGFILEENV[];
constexpr const char* S3fsLog::nest_spaces[];
constexpr char S3fsLog::MSGTIMESTAMP[];
S3fsLog* S3fsLog::pSingleton = nullptr;
S3fsLog::s3fs_log_level S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
S3fsLog::Level S3fsLog::debug_level = S3fsLog::Level::CRIT;
FILE* S3fsLog::logfp = nullptr;
std::string S3fsLog::logfile;
bool S3fsLog::time_stamp = true;
@ -42,9 +43,9 @@ bool S3fsLog::time_stamp = true;
//-------------------------------------------------------------------
// S3fsLog class : class methods
//-------------------------------------------------------------------
bool S3fsLog::IsS3fsLogLevel(s3fs_log_level level)
bool S3fsLog::IsS3fsLogLevel(S3fsLog::Level level)
{
return (level == (S3fsLog::debug_level & level));
return static_cast<int>(level) == (static_cast<int>(S3fsLog::debug_level) & static_cast<int>(level));
}
std::string S3fsLog::GetCurrentTime()
@ -94,7 +95,7 @@ bool S3fsLog::ReopenLogfile()
return S3fsLog::pSingleton->LowSetLogfile(tmp.c_str());
}
S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
S3fsLog::Level S3fsLog::SetLogLevel(S3fsLog::Level level)
{
if(!S3fsLog::pSingleton){
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
@ -103,7 +104,7 @@ S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
return S3fsLog::pSingleton->LowSetLogLevel(level);
}
S3fsLog::s3fs_log_level S3fsLog::BumpupLogLevel()
S3fsLog::Level S3fsLog::BumpupLogLevel()
{
if(!S3fsLog::pSingleton){
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
@ -145,7 +146,7 @@ S3fsLog::~S3fsLog()
}
S3fsLog::logfile.clear();
S3fsLog::pSingleton = nullptr;
S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
S3fsLog::debug_level = Level::CRIT;
closelog();
}else{
@ -217,7 +218,7 @@ bool S3fsLog::LowSetLogfile(const char* pfile)
return true;
}
S3fsLog::s3fs_log_level S3fsLog::LowSetLogLevel(s3fs_log_level level)
S3fsLog::Level S3fsLog::LowSetLogLevel(Level level)
{
if(S3fsLog::pSingleton != this){
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
@ -226,30 +227,30 @@ S3fsLog::s3fs_log_level S3fsLog::LowSetLogLevel(s3fs_log_level level)
if(level == S3fsLog::debug_level){
return S3fsLog::debug_level;
}
s3fs_log_level old = S3fsLog::debug_level;
Level old = S3fsLog::debug_level;
S3fsLog::debug_level = level;
setlogmask(LOG_UPTO(GetSyslogLevel(S3fsLog::debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", GetLevelString(old), GetLevelString(S3fsLog::debug_level));
return old;
}
S3fsLog::s3fs_log_level S3fsLog::LowBumpupLogLevel()
S3fsLog::Level S3fsLog::LowBumpupLogLevel() const
{
if(S3fsLog::pSingleton != this){
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
}
s3fs_log_level old = S3fsLog::debug_level;
S3fsLog::debug_level = ( LEVEL_CRIT == S3fsLog::debug_level ? LEVEL_ERR :
LEVEL_ERR == S3fsLog::debug_level ? LEVEL_WARN :
LEVEL_WARN == S3fsLog::debug_level ? LEVEL_INFO :
LEVEL_INFO == S3fsLog::debug_level ? LEVEL_DBG : LEVEL_CRIT );
Level old = S3fsLog::debug_level;
S3fsLog::debug_level = ( Level::CRIT == S3fsLog::debug_level ? Level::ERR :
Level::ERR == S3fsLog::debug_level ? Level::WARN :
Level::WARN == S3fsLog::debug_level ? Level::INFO :
Level::INFO == S3fsLog::debug_level ? Level::DBG : Level::CRIT );
setlogmask(LOG_UPTO(GetSyslogLevel(S3fsLog::debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", GetLevelString(old), GetLevelString(S3fsLog::debug_level));
return old;
}
void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char *func, int line, const char *fmt, ...)
void s3fs_low_logprn(S3fsLog::Level level, const char* file, const char *func, int line, const char *fmt, ...)
{
if(S3fsLog::IsS3fsLogLevel(level)){
va_list va;
@ -257,7 +258,7 @@ void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
va_end(va);
std::unique_ptr<char[]> message(new char[len]);
auto message = std::make_unique<char[]>(len);
va_start(va, fmt);
vsnprintf(message.get(), len, fmt, va);
va_end(va);
@ -273,7 +274,7 @@ void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char
}
}
void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file, const char *func, int line, const char *fmt, ...)
void s3fs_low_logprn2(S3fsLog::Level level, int nest, const char* file, const char *func, int line, const char *fmt, ...)
{
if(S3fsLog::IsS3fsLogLevel(level)){
va_list va;
@ -281,7 +282,7 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
va_end(va);
std::unique_ptr<char[]> message(new char[len]);
auto message = std::make_unique<char[]>(len);
va_start(va, fmt);
vsnprintf(message.get(), len, fmt, va);
va_end(va);

View File

@ -22,6 +22,7 @@
#define S3FS_LOGGER_H_
#include <cstdarg>
#include <cstdint>
#include <cstdio>
#include <string>
#include <syslog.h>
@ -42,12 +43,12 @@
class S3fsLog
{
public:
enum s3fs_log_level{
LEVEL_CRIT = 0, // LEVEL_CRIT
LEVEL_ERR = 1, // LEVEL_ERR
LEVEL_WARN = 3, // LEVEL_WARNING
LEVEL_INFO = 7, // LEVEL_INFO
LEVEL_DBG = 15 // LEVEL_DEBUG
enum class Level : uint8_t {
CRIT = 0, // LEVEL_CRIT
ERR = 1, // LEVEL_ERR
WARN = 3, // LEVEL_WARNING
INFO = 7, // LEVEL_INFO
DBG = 15 // LEVEL_DEBUG
};
protected:
@ -57,7 +58,7 @@ class S3fsLog
static constexpr char MSGTIMESTAMP[] = "S3FS_MSGTIMESTAMP";
static S3fsLog* pSingleton;
static s3fs_log_level debug_level;
static Level debug_level;
static FILE* logfp;
static std::string logfile;
static bool time_stamp;
@ -65,33 +66,35 @@ class S3fsLog
protected:
bool LowLoadEnv();
bool LowSetLogfile(const char* pfile);
s3fs_log_level LowSetLogLevel(s3fs_log_level level);
s3fs_log_level LowBumpupLogLevel();
Level LowSetLogLevel(Level level);
Level LowBumpupLogLevel() const;
public:
static bool IsS3fsLogLevel(s3fs_log_level level);
static bool IsS3fsLogCrit() { return IsS3fsLogLevel(LEVEL_CRIT); }
static bool IsS3fsLogErr() { return IsS3fsLogLevel(LEVEL_ERR); }
static bool IsS3fsLogWarn() { return IsS3fsLogLevel(LEVEL_WARN); }
static bool IsS3fsLogInfo() { return IsS3fsLogLevel(LEVEL_INFO); }
static bool IsS3fsLogDbg() { return IsS3fsLogLevel(LEVEL_DBG); }
static bool IsS3fsLogLevel(Level level);
static bool IsS3fsLogCrit() { return IsS3fsLogLevel(Level::CRIT); }
static bool IsS3fsLogErr() { return IsS3fsLogLevel(Level::ERR); }
static bool IsS3fsLogWarn() { return IsS3fsLogLevel(Level::WARN); }
static bool IsS3fsLogInfo() { return IsS3fsLogLevel(Level::INFO); }
static bool IsS3fsLogDbg() { return IsS3fsLogLevel(Level::DBG); }
static constexpr int GetSyslogLevel(s3fs_log_level level)
static constexpr int GetSyslogLevel(Level level)
{
return ( LEVEL_DBG == (level & LEVEL_DBG) ? LOG_DEBUG :
LEVEL_INFO == (level & LEVEL_DBG) ? LOG_INFO :
LEVEL_WARN == (level & LEVEL_DBG) ? LOG_WARNING :
LEVEL_ERR == (level & LEVEL_DBG) ? LOG_ERR : LOG_CRIT );
int masked = static_cast<int>(level) & static_cast<int>(Level::DBG);
return ( static_cast<int>(Level::DBG) == masked ? LOG_DEBUG :
static_cast<int>(Level::INFO) == masked ? LOG_INFO :
static_cast<int>(Level::WARN) == masked ? LOG_WARNING :
static_cast<int>(Level::ERR) == masked ? LOG_ERR : LOG_CRIT );
}
static std::string GetCurrentTime();
static constexpr const char* GetLevelString(s3fs_log_level level)
static constexpr const char* GetLevelString(Level level)
{
return ( LEVEL_DBG == (level & LEVEL_DBG) ? "[DBG] " :
LEVEL_INFO == (level & LEVEL_DBG) ? "[INF] " :
LEVEL_WARN == (level & LEVEL_DBG) ? "[WAN] " :
LEVEL_ERR == (level & LEVEL_DBG) ? "[ERR] " : "[CRT] " );
int masked = static_cast<int>(level) & static_cast<int>(Level::DBG);
return ( static_cast<int>(Level::DBG) == masked ? "[DBG] " :
static_cast<int>(Level::INFO) == masked ? "[INF] " :
static_cast<int>(Level::WARN) == masked ? "[WAN] " :
static_cast<int>(Level::ERR) == masked ? "[ERR] " : "[CRT] " );
}
static constexpr const char* GetS3fsLogNest(int nest)
@ -130,8 +133,8 @@ class S3fsLog
static bool SetLogfile(const char* pfile);
static bool ReopenLogfile();
static s3fs_log_level SetLogLevel(s3fs_log_level level);
static s3fs_log_level BumpupLogLevel();
static Level SetLogLevel(Level level);
static Level BumpupLogLevel();
static bool SetTimeStamp(bool value);
explicit S3fsLog();
@ -145,13 +148,13 @@ class S3fsLog
//-------------------------------------------------------------------
// Debug macros
//-------------------------------------------------------------------
void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char *func, int line, const char *fmt, ...) __attribute__ ((format (printf, 5, 6)));
void s3fs_low_logprn(S3fsLog::Level level, const char* file, const char *func, int line, const char *fmt, ...) __attribute__ ((format (printf, 5, 6)));
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
do{ \
s3fs_low_logprn(level, __FILE__, __func__, __LINE__, fmt, ##__VA_ARGS__); \
}while(0)
void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file, const char *func, int line, const char *fmt, ...) __attribute__ ((format (printf, 6, 7)));
void s3fs_low_logprn2(S3fsLog::Level level, int nest, const char* file, const char *func, int line, const char *fmt, ...) __attribute__ ((format (printf, 6, 7)));
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
do{ \
s3fs_low_logprn2(level, nest, __FILE__, __func__, __LINE__, fmt, ##__VA_ARGS__); \
@ -164,7 +167,7 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
fprintf(S3fsLog::GetOutputLogFile(), "%s[CURL DBG] " fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), __VA_ARGS__); \
S3fsLog::Flush(); \
}else{ \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
@ -176,7 +179,7 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
S3fsLog::Flush(); \
}else{ \
fprintf(S3fsLog::GetErrorLogFile(), "s3fs: " fmt "%s\n", __VA_ARGS__); \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
@ -185,10 +188,10 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
do{ \
if(foreground || S3fsLog::IsSetLogFile()){ \
S3fsLog::SeekEnd(); \
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): " fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(S3fsLog::LEVEL_INFO), S3fsLog::GetS3fsLogNest(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): " fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(S3fsLog::Level::INFO), S3fsLog::GetS3fsLogNest(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
S3fsLog::Flush(); \
}else{ \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(0), __VA_ARGS__, ""); \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::INFO), "%s%s" fmt "%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(0), __VA_ARGS__, ""); \
} \
}while(0)
@ -196,10 +199,10 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
do{ \
if(foreground || S3fsLog::IsSetLogFile()){ \
S3fsLog::SeekEnd(); \
fprintf(S3fsLog::GetOutputLogFile(), "%s%s" fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(S3fsLog::LEVEL_INFO), __VA_ARGS__, ""); \
fprintf(S3fsLog::GetOutputLogFile(), "%s%s" fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(S3fsLog::Level::INFO), __VA_ARGS__, ""); \
S3fsLog::Flush(); \
}else{ \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_INFO), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__, ""); \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::INFO), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__, ""); \
} \
}while(0)
@ -211,7 +214,7 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
S3fsLog::Flush(); \
}else{ \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
@ -219,14 +222,14 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
// small trick for VA_ARGS
//
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_CRIT, fmt, ##__VA_ARGS__)
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_ERR, fmt, ##__VA_ARGS__)
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_WARN, fmt, ##__VA_ARGS__)
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_DBG, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 0, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 1, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 2, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 3, fmt, ##__VA_ARGS__)
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::CRIT, fmt, ##__VA_ARGS__)
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::ERR, fmt, ##__VA_ARGS__)
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::WARN, fmt, ##__VA_ARGS__)
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::DBG, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 0, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 1, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 2, fmt, ##__VA_ARGS__)
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 3, fmt, ##__VA_ARGS__)
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
@ -247,15 +250,15 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
} while (0)
#define FUSE_CTX_INFO(fmt, ...) do { \
PRINT_FUSE_CTX(S3fsLog::LEVEL_INFO, 0, fmt, ##__VA_ARGS__); \
PRINT_FUSE_CTX(S3fsLog::Level::INFO, 0, fmt, ##__VA_ARGS__); \
} while (0)
#define FUSE_CTX_INFO1(fmt, ...) do { \
PRINT_FUSE_CTX(S3fsLog::LEVEL_INFO, 1, fmt, ##__VA_ARGS__); \
PRINT_FUSE_CTX(S3fsLog::Level::INFO, 1, fmt, ##__VA_ARGS__); \
} while (0)
#define FUSE_CTX_DBG(fmt, ...) do { \
PRINT_FUSE_CTX(S3fsLog::LEVEL_DBG, 0, fmt, ##__VA_ARGS__); \
PRINT_FUSE_CTX(S3fsLog::Level::DBG, 0, fmt, ##__VA_ARGS__); \
} while (0)
#endif // S3FS_LOGGER_H_

1517
src/s3fs_threadreqs.cpp Normal file

File diff suppressed because it is too large Load Diff

267
src/s3fs_threadreqs.h Normal file
View File

@ -0,0 +1,267 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_THREADREQS_H_
#define S3FS_THREADREQS_H_
#include <string>
#include "common.h"
#include "metaheader.h"
#include "curl.h"
#include "s3objlist.h"
#include "syncfiller.h"
#include "psemaphore.h"
//-------------------------------------------------------------------
// Structures for MultiThread Request
//-------------------------------------------------------------------
typedef std::map<std::string, int> retrycnt_t;
//
// Head Request parameter structure for Thread Pool.
//
struct head_req_thparam
{
std::string path;
headers_t* pmeta = nullptr;
int result = 0;
};
//
// Multi Head Request parameter structure for Thread Pool.
//
struct multi_head_req_thparam
{
std::string path;
SyncFiller* psyncfiller = nullptr;
std::mutex* pthparam_lock = nullptr;
int* pretrycount = nullptr;
s3obj_list_t* pnotfound_list = nullptr;
bool use_wtf8 = false;
objtype_t objtype = objtype_t::UNKNOWN;
int* presult = nullptr;
};
//
// Delete Request parameter structure for Thread Pool.
//
struct delete_req_thparam
{
std::string path;
int result = 0;
};
//
// Put Head Request parameter structure for Thread Pool.
//
struct put_head_req_thparam
{
std::string path;
headers_t meta;
bool isCopy = false;
int result = 0;
};
//
// Put Request parameter structure for Thread Pool.
//
struct put_req_thparam
{
std::string path;
headers_t meta;
int fd = -1;
bool ahbe = false;
int result = 0;
};
//
// List Bucket Request parameter structure for Thread Pool.
//
struct list_bucket_req_thparam
{
std::string path;
std::string query;
std::string* presponseBody = nullptr;
int result = 0;
};
//
// Check Service Request parameter structure for Thread Pool.
//
struct check_service_req_thparam
{
std::string path;
bool forceNoSSE = false;
bool support_compat_dir = false;
long* presponseCode = nullptr;
std::string* presponseBody = nullptr;
int result = 0;
};
//
// Pre Multipart Upload Request parameter structure for Thread Pool.
//
struct pre_multipart_upload_req_thparam
{
std::string path;
headers_t meta;
std::string upload_id;
int result = 0;
};
//
// Multipart Upload Part Request parameter structure for Thread Pool.
//
struct multipart_upload_part_req_thparam
{
std::string path;
std::string upload_id;
int upload_fd = -1;
off_t start = 0;
off_t size = 0;
bool is_copy = false;
int part_num = -1;
std::mutex* pthparam_lock = nullptr;
etagpair* petag = nullptr;
int* presult = nullptr;
};
//
// Complete Multipart Upload Request parameter structure for Thread Pool.
//
struct complete_multipart_upload_req_thparam
{
std::string path;
std::string upload_id;
etaglist_t etaglist;
int result = 0;
};
//
// Abort Multipart Upload Request parameter structure for Thread Pool.
//
struct abort_multipart_upload_req_thparam
{
std::string path;
std::string upload_id;
int result = 0;
};
//
// Multipart Put Head Request parameter structure for Thread Pool.
//
struct multipart_put_head_req_thparam
{
std::string from;
std::string to;
std::string upload_id;
int part_number = 0;
headers_t meta;
std::mutex* pthparam_lock = nullptr;
filepart* ppartdata = nullptr;
int* pretrycount = nullptr;
int* presult = nullptr;
};
//
// Parallel Get Object Request parameter structure for Thread Pool.
//
struct parallel_get_object_req_thparam
{
std::string path;
int fd = -1;
off_t start = 0;
off_t size = 0;
sse_type_t ssetype = sse_type_t::SSE_DISABLE;
std::string ssevalue;
std::mutex* pthparam_lock = nullptr;
int* pretrycount = nullptr;
int* presult = nullptr;
};
//
// Get Object Request parameter structure for Thread Pool.
//
struct get_object_req_thparam
{
std::string path;
int fd = -1;
off_t start = 0;
off_t size = 0;
int result = 0;
};
//-------------------------------------------------------------------
// Thread Worker functions for MultiThread Request
//-------------------------------------------------------------------
void* head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* multi_head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* delete_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* put_head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* put_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* list_bucket_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* check_service_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* pre_multipart_upload_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* multipart_upload_part_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* complete_multipart_upload_threadworker(S3fsCurl& s3fscurl, void* arg);
void* abort_multipart_upload_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* multipart_put_head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* parallel_get_object_req_threadworker(S3fsCurl& s3fscurl, void* arg);
void* get_object_req_threadworker(S3fsCurl& s3fscurl, void* arg);
//-------------------------------------------------------------------
// Utility functions
//-------------------------------------------------------------------
int head_request(const std::string& strpath, headers_t& header);
int multi_head_request(const std::string& strpath, SyncFiller& syncfiller, std::mutex& thparam_lock, int& retrycount, s3obj_list_t& notfound_list, bool use_wtf8, objtype_t objtype, int& result, Semaphore& sem);
int delete_request(const std::string& strpath);
int put_head_request(const std::string& strpath, const headers_t& meta, bool is_copy);
int put_request(const std::string& strpath, const headers_t& meta, int fd, bool ahbe);
int list_bucket_request(const std::string& strpath, const std::string& query, std::string& responseBody);
int check_service_request(const std::string& strpath, bool forceNoSSE, bool support_compat_dir, long& responseCode, std::string& responseBody);
int pre_multipart_upload_request(const std::string& path, const headers_t& meta, std::string& upload_id);
int multipart_upload_part_request(const std::string& path, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy, Semaphore* psem, std::mutex* pthparam_lock, int* req_result);
int await_multipart_upload_part_request(const std::string& path, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy);
int multipart_upload_request(const std::string& path, const headers_t& meta, int upload_fd);
int mix_multipart_upload_request(const std::string& path, headers_t& meta, int upload_fd, const fdpage_list_t& mixuppages);
int complete_multipart_upload_request(const std::string& path, const std::string& upload_id, const etaglist_t& parts);
int abort_multipart_upload_request(const std::string& path, const std::string& upload_id);
int multipart_put_head_request(const std::string& strfrom, const std::string& strto, off_t size, const headers_t& meta);
int parallel_get_object_request(const std::string& path, int fd, off_t start, off_t size);
int get_object_request(const std::string& path, int fd, off_t start, off_t size);
//-------------------------------------------------------------------
// Direct Call Utility Functions
//-------------------------------------------------------------------
int get_iamv2api_token_request(const std::string& strurl, int tokenttl, const std::string& strttlhdr, std::string& token);
int get_iamrole_request(const std::string& strurl, const std::string& striamtoken, std::string& token);
int get_iamcred_request(const std::string& strurl, const std::string& striamtoken, const std::string& stribmsecret, std::string& cred);
#endif // S3FS_THREADREQS_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: expandtab sw=4 ts=4 fdm=marker
* vim<600: expandtab sw=4 ts=4
*/

View File

@ -24,6 +24,7 @@
#include <cerrno>
#include <grp.h>
#include <memory>
#include <mutex>
#include <pwd.h>
#include <libgen.h>
#include <dirent.h>
@ -37,7 +38,6 @@
#include "s3fs_util.h"
#include "string_util.h"
#include "s3fs_help.h"
#include "autolock.h"
//-------------------------------------------------------------------
// Global variables
@ -66,20 +66,22 @@ void init_sysconf_vars()
// there is no hard limit on the size of the buffer needed to
// store all the groups returned.
errno = 0;
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
if(0 > res){
if (errno != 0){
S3FS_PRN_WARN("could not get max pw length.");
S3FS_PRN_ERR("could not get max password length.");
abort();
}
res = 1024; // default initial length
}
max_password_size = res;
errno = 0;
res = sysconf(_SC_GETGR_R_SIZE_MAX);
if(0 > res) {
if (errno != 0) {
S3FS_PRN_ERR("could not get max name length.");
S3FS_PRN_ERR("could not get max group name length.");
abort();
}
res = 1024; // default initial length
@ -99,11 +101,11 @@ std::string get_username(uid_t uid)
struct passwd* ppwinfo = nullptr;
// make buffer
std::unique_ptr<char[]> pbuf(new char[maxlen]);
auto pbuf = std::make_unique<char[]>(maxlen);
// get pw information
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf.get(), maxlen, &ppwinfo))){
maxlen *= 2;
pbuf.reset(new char[maxlen]);
pbuf = std::make_unique<char[]>(maxlen);
}
if(0 != result){
@ -127,11 +129,11 @@ int is_uid_include_group(uid_t uid, gid_t gid)
struct group* pginfo = nullptr;
// make buffer
std::unique_ptr<char[]> pbuf(new char[maxlen]);
auto pbuf = std::make_unique<char[]>(maxlen);
// get group information
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf.get(), maxlen, &pginfo))){
maxlen *= 2;
pbuf.reset(new char[maxlen]);
pbuf = std::make_unique<char[]>(maxlen);
}
if(0 != result){
@ -167,52 +169,11 @@ int is_uid_include_group(uid_t uid, gid_t gid)
// conflicts.
// To avoid this, exclusive control is performed by mutex.
//
static pthread_mutex_t* pbasename_lock = nullptr;
bool init_basename_lock()
{
if(pbasename_lock){
S3FS_PRN_ERR("already initialized mutex for posix dirname/basename function.");
return false;
}
pbasename_lock = new pthread_mutex_t;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(pbasename_lock, &attr))){
S3FS_PRN_ERR("failed to init pbasename_lock: %d.", result);
delete pbasename_lock;
pbasename_lock = nullptr;
return false;
}
return true;
}
bool destroy_basename_lock()
{
if(!pbasename_lock){
S3FS_PRN_ERR("the mutex for posix dirname/basename function is not initialized.");
return false;
}
int result;
if(0 != (result = pthread_mutex_destroy(pbasename_lock))){
S3FS_PRN_ERR("failed to destroy pbasename_lock: %d", result);
return false;
}
delete pbasename_lock;
pbasename_lock = nullptr;
return true;
}
static std::mutex basename_lock;
std::string mydirname(const std::string& path)
{
AutoLock auto_lock(pbasename_lock);
const std::lock_guard<std::mutex> lock(basename_lock);
return mydirname(path.c_str());
}
@ -225,15 +186,19 @@ std::string mydirname(const char* path)
return "";
}
char *buf = strdup(path);
std::string result = dirname(buf);
free(buf);
// [TODO]
// Currently, use "&str[pos]" to make it possible to build with C++14.
// Once we support C++17 or later, we will use "str.data()".
//
std::string strPath = path;
strPath.push_back('\0'); // terminate with a null character and allocate space for it.
std::string result = dirname(&strPath[0]); // NOLINT(readability-container-data-pointer)
return result;
}
std::string mybasename(const std::string& path)
{
AutoLock auto_lock(pbasename_lock);
const std::lock_guard<std::mutex> data_lock(basename_lock);
return mybasename(path.c_str());
}
@ -246,9 +211,13 @@ std::string mybasename(const char* path)
return "";
}
char *buf = strdup(path);
std::string result = basename(buf);
free(buf);
// [TODO]
// Currently, use "&str[pos]" to make it possible to build with C++14.
// Once we support C++17 or later, we will use "str.data()".
//
std::string strPath = path;
strPath.push_back('\0'); // terminate with a null character and allocate space for it.
std::string result = basename(&strPath[0]); // NOLINT(readability-container-data-pointer)
return result;
}
@ -444,124 +413,6 @@ void print_launch_message(int argc, char** argv)
S3FS_PRN_LAUNCH_INFO("%s", message.c_str());
}
//
// result: -1 ts1 < ts2
// 0 ts1 == ts2
// 1 ts1 > ts2
//
int compare_timespec(const struct timespec& ts1, const struct timespec& ts2)
{
if(ts1.tv_sec < ts2.tv_sec){
return -1;
}else if(ts1.tv_sec > ts2.tv_sec){
return 1;
}else{
if(ts1.tv_nsec < ts2.tv_nsec){
return -1;
}else if(ts1.tv_nsec > ts2.tv_nsec){
return 1;
}
}
return 0;
}
//
// result: -1 st < ts
// 0 st == ts
// 1 st > ts
//
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts)
{
struct timespec st_ts;
set_stat_to_timespec(st, type, st_ts);
return compare_timespec(st_ts, ts);
}
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts)
{
if(stat_time_type::ATIME == type){
#if defined(__APPLE__)
st.st_atime = ts.tv_sec;
st.st_atimespec.tv_nsec = ts.tv_nsec;
#else
st.st_atim.tv_sec = ts.tv_sec;
st.st_atim.tv_nsec = ts.tv_nsec;
#endif
}else if(stat_time_type::MTIME == type){
#if defined(__APPLE__)
st.st_mtime = ts.tv_sec;
st.st_mtimespec.tv_nsec = ts.tv_nsec;
#else
st.st_mtim.tv_sec = ts.tv_sec;
st.st_mtim.tv_nsec = ts.tv_nsec;
#endif
}else if(stat_time_type::CTIME == type){
#if defined(__APPLE__)
st.st_ctime = ts.tv_sec;
st.st_ctimespec.tv_nsec = ts.tv_nsec;
#else
st.st_ctim.tv_sec = ts.tv_sec;
st.st_ctim.tv_nsec = ts.tv_nsec;
#endif
}else{
S3FS_PRN_ERR("unknown type(%d), so skip to set value.", static_cast<int>(type));
}
}
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts)
{
if(stat_time_type::ATIME == type){
#if defined(__APPLE__)
ts.tv_sec = st.st_atime;
ts.tv_nsec = st.st_atimespec.tv_nsec;
#else
ts = st.st_atim;
#endif
}else if(stat_time_type::MTIME == type){
#if defined(__APPLE__)
ts.tv_sec = st.st_mtime;
ts.tv_nsec = st.st_mtimespec.tv_nsec;
#else
ts = st.st_mtim;
#endif
}else if(stat_time_type::CTIME == type){
#if defined(__APPLE__)
ts.tv_sec = st.st_ctime;
ts.tv_nsec = st.st_ctimespec.tv_nsec;
#else
ts = st.st_ctim;
#endif
}else{
S3FS_PRN_ERR("unknown type(%d), so use 0 as timespec.", static_cast<int>(type));
ts.tv_sec = 0;
ts.tv_nsec = 0;
}
return &ts;
}
std::string str_stat_time(const struct stat& st, stat_time_type type)
{
struct timespec ts;
return str(*set_stat_to_timespec(st, type, ts));
}
struct timespec* s3fs_realtime(struct timespec& ts)
{
if(-1 == clock_gettime(static_cast<clockid_t>(CLOCK_REALTIME), &ts)){
S3FS_PRN_WARN("failed to clock_gettime by errno(%d)", errno);
ts.tv_sec = time(nullptr);
ts.tv_nsec = 0;
}
return &ts;
}
std::string s3fs_str_realtime()
{
struct timespec ts;
return str(*s3fs_realtime(ts));
}
int s3fs_fclose(FILE* fp)
{
if(fp == nullptr){

View File

@ -21,7 +21,10 @@
#ifndef S3FS_S3FS_UTIL_H_
#define S3FS_S3FS_UTIL_H_
#include <cstdint>
#include <functional>
#include <string>
#include <sys/stat.h>
#ifndef CLOCK_REALTIME
#define CLOCK_REALTIME 0
@ -42,8 +45,6 @@ void init_sysconf_vars();
std::string get_username(uid_t uid);
int is_uid_include_group(uid_t uid, gid_t gid);
bool init_basename_lock();
bool destroy_basename_lock();
std::string mydirname(const char* path);
std::string mydirname(const std::string& path);
std::string mybasename(const char* path);
@ -58,55 +59,40 @@ bool compare_sysname(const char* target);
void print_launch_message(int argc, char** argv);
//
// Utility for nanosecond time(timespec)
//
enum class stat_time_type{
ATIME,
MTIME,
CTIME
};
//-------------------------------------------------------------------
// Utility for nanosecond time(timespec)
//-------------------------------------------------------------------
static constexpr struct timespec S3FS_OMIT_TS = {0, UTIME_OMIT};
int compare_timespec(const struct timespec& ts1, const struct timespec& ts2);
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts);
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts);
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts);
std::string str_stat_time(const struct stat& st, stat_time_type type);
struct timespec* s3fs_realtime(struct timespec& ts);
std::string s3fs_str_realtime();
// Wrap fclose since it is illegal to take the address of a stdlib function
int s3fs_fclose(FILE* fp);
class scope_guard {
public:
template<class Callable>
explicit scope_guard(Callable&& undo_func)
: func(std::forward<Callable>(undo_func))
{}
class scope_guard
{
public:
template<class Callable>
~scope_guard() {
if(func != nullptr) {
func();
explicit scope_guard(Callable&& undo_func)
: func(std::forward<Callable>(undo_func))
{}
~scope_guard()
{
if(func != nullptr) {
func();
}
}
}
void dismiss() {
func = nullptr;
}
void dismiss()
{
func = nullptr;
}
scope_guard(const scope_guard&) = delete;
scope_guard(scope_guard&& other) = delete;
scope_guard& operator=(const scope_guard&) = delete;
scope_guard& operator=(scope_guard&&) = delete;
scope_guard(const scope_guard&) = delete;
scope_guard(scope_guard&& other) = delete;
scope_guard& operator=(const scope_guard&) = delete;
scope_guard& operator=(scope_guard&&) = delete;
private:
std::function<void()> func;
private:
std::function<void()> func;
};
#endif // S3FS_S3FS_UTIL_H_

View File

@ -21,25 +21,32 @@
#include <cstdio>
#include <cstdlib>
#include <libxml/xpathInternals.h>
#include <mutex>
#include <string>
#include "common.h"
#include "s3fs.h"
#include "s3fs_logger.h"
#include "s3fs_xml.h"
#include "s3fs_util.h"
#include "s3objlist.h"
#include "autolock.h"
#include "string_util.h"
//-------------------------------------------------------------------
// Symbols
//-------------------------------------------------------------------
enum class get_object_name_result : std::uint8_t {
SUCCESS,
FAILURE,
FILE_OR_SUBDIR_IN_DIR
};
//-------------------------------------------------------------------
// Variables
//-------------------------------------------------------------------
static constexpr char c_strErrorObjectName[] = "FILE or SUBDIR in DIR";
// [NOTE]
// mutex for static variables in GetXmlNsUrl
//
static pthread_mutex_t* pxml_parser_mutex = nullptr;
static std::mutex xml_parser_mutex;
//-------------------------------------------------------------------
// Functions
@ -48,7 +55,7 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
{
bool result = false;
if(!pxml_parser_mutex || !doc){
if(!doc){
return result;
}
@ -57,7 +64,7 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
static time_t tmLast = 0; // cache for 60 sec.
static std::string strNs;
AutoLock lock(pxml_parser_mutex);
const std::lock_guard<std::mutex> lock(xml_parser_mutex);
if((tmLast + 60) < time(nullptr)){
// refresh
@ -65,15 +72,14 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
strNs = "";
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
if(pRootNode){
xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode);
std::unique_ptr<xmlNsPtr, decltype(xmlFree)> nslist(xmlGetNsList(doc, pRootNode), xmlFree);
if(nslist){
if(nslist[0] && nslist[0]->href){
int len = xmlStrlen(nslist[0]->href);
if(*nslist && (*nslist)[0].href){
int len = xmlStrlen((*nslist)[0].href);
if(0 < len){
strNs = std::string(reinterpret_cast<const char*>(nslist[0]->href), len);
strNs = std::string(reinterpret_cast<const char*>((*nslist)[0].href), len);
}
}
S3FS_XMLFREE(nslist);
}
}
}
@ -134,20 +140,17 @@ unique_ptr_xmlChar get_next_marker(xmlDocPtr doc)
return get_base_exp(doc, "NextMarker");
}
// return: the pointer to object name on allocated memory.
// the pointer to "c_strErrorObjectName".(not allocated)
// nullptr(a case of something error occurred)
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
static std::pair<get_object_name_result, std::string> get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
{
// Get full path
unique_ptr_xmlChar fullpath(xmlNodeListGetString(doc, node, 1), xmlFree);
if(!fullpath){
S3FS_PRN_ERR("could not get object full path name..");
return nullptr;
return {get_object_name_result::FAILURE, ""};
}
// basepath(path) is as same as fullpath.
if(0 == strcmp(reinterpret_cast<char*>(fullpath.get()), path)){
return const_cast<char*>(c_strErrorObjectName);
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
}
// Make dir path and filename
@ -158,31 +161,31 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
const char* basepath= (path && '/' == path[0]) ? &path[1] : path;
if('\0' == mybname[0]){
return nullptr;
return {get_object_name_result::FAILURE, ""};
}
// check subdir & file in subdir
if(0 < strlen(dirpath)){
// case of "/"
if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){
return const_cast<char*>(c_strErrorObjectName);
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
}
// case of "."
if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){
return const_cast<char *>(c_strErrorObjectName);
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
}
// case of ".."
if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){
return const_cast<char *>(c_strErrorObjectName);
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
}
// case of "name"
if(0 == strcmp(dirpath, ".")){
// OK
return strdup(mybname);
return {get_object_name_result::SUCCESS, mybname};
}else{
if(basepath && 0 == strcmp(dirpath, basepath)){
// OK
return strdup(mybname);
return {get_object_name_result::SUCCESS, mybname};
}else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){
std::string withdirname;
if(strlen(dirpath) > strlen(basepath)){
@ -194,12 +197,12 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
withdirname += "/";
}
withdirname += mybname;
return strdup(withdirname.c_str());
return {get_object_name_result::SUCCESS, withdirname};
}
}
}
// case of something wrong
return const_cast<char*>(c_strErrorObjectName);
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
}
static unique_ptr_xmlChar get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
@ -348,12 +351,13 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
continue;
}
xmlNodeSetPtr key_nodes = key->nodesetval;
char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path);
auto result = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path);
if(!name){
switch(result.first){
case get_object_name_result::FAILURE:
S3FS_PRN_WARN("name is something wrong. but continue.");
}else if(reinterpret_cast<const char*>(name) != c_strErrorObjectName){
break;
case get_object_name_result::SUCCESS: {
is_dir = isCPrefix ? true : false;
stretag = "";
@ -377,18 +381,20 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
// The XML data passed to this function is CR code(\r) encoded.
// The function below decodes that encoded CR code.
//
std::string decname = get_decoded_cr_code(name);
free(name);
std::string decname = get_decoded_cr_code(result.second.c_str());
if(prefix){
head.common_prefixes.push_back(decname);
head.AddCommonPrefix(decname);
}
if(!head.insert(decname.c_str(), (!stretag.empty() ? stretag.c_str() : nullptr), is_dir)){
S3FS_PRN_ERR("insert_object returns with error.");
return -1;
}
}else{
break;
}
case get_object_name_result::FILE_OR_SUBDIR_IN_DIR:
S3FS_PRN_DBG("name is file or subdir in dir. but continue.");
break;
}
}
@ -445,13 +451,26 @@ bool simple_parse_xml(const char* data, size_t len, const char* key, std::string
{
bool result = false;
if(!data || !key){
if(!data || !key || 0 == len){
return false;
}
value.clear();
// [NOTE]
// If data is not nullptr and len is 0, this function will output the message
// ":1: parser error : Document is empty" to stderr.
// Make sure len is not 0 beforehand.
//
s3fsXmlBufferParserError parserError;
parserError.SetXmlParseError();
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(data, static_cast<int>(len), "", nullptr, 0), xmlFreeDoc);
if(nullptr == doc){
if(parserError.IsXmlParseError()){
S3FS_PRN_ERR("xmlReadMemory returns with error: %s", parserError.GetXmlParseError().c_str());
}else{
S3FS_PRN_ERR("xmlReadMemory returns with error.");
}
return false;
}
@ -483,44 +502,6 @@ bool simple_parse_xml(const char* data, size_t len, const char* key, std::string
return result;
}
//-------------------------------------------------------------------
// Utility for lock
//-------------------------------------------------------------------
bool init_parser_xml_lock()
{
if(pxml_parser_mutex){
return false;
}
pxml_parser_mutex = new pthread_mutex_t;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
if(0 != pthread_mutex_init(pxml_parser_mutex, &attr)){
delete pxml_parser_mutex;
pxml_parser_mutex = nullptr;
return false;
}
return true;
}
bool destroy_parser_xml_lock()
{
if(!pxml_parser_mutex){
return false;
}
if(0 != pthread_mutex_destroy(pxml_parser_mutex)){
return false;
}
delete pxml_parser_mutex;
pxml_parser_mutex = nullptr;
return true;
}
/*
* Local variables:
* tab-width: 4

View File

@ -22,9 +22,11 @@
#define S3FS_S3FS_XML_H_
#include <libxml/xpath.h>
#include <libxml/parser.h> // [NOTE] nessetially include this header in some environments
#include <libxml/parser.h> // [NOTE] include this header in some environments
#include <memory>
#include <string>
#include <array>
#include <cstring>
#include "mpu_util.h"
@ -35,6 +37,44 @@ typedef std::unique_ptr<xmlXPathObject, decltype(&xmlXPathFreeObject)> unique_pt
typedef std::unique_ptr<xmlXPathContext, decltype(&xmlXPathFreeContext)> unique_ptr_xmlXPathContext;
typedef std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> unique_ptr_xmlDoc;
//-------------------------------------------------------------------
// Utility Class
//-------------------------------------------------------------------
class s3fsXmlBufferParserError
{
private:
static constexpr int ERROR_BUFFER_SIZE = 1024;
std::array<char, ERROR_BUFFER_SIZE> error_buffer{};
static void ParserErrorHandler(void* ctx, const char *msg, ...)
{
auto* errbuf = static_cast<char*>(ctx);
if(errbuf){
va_list args;
va_start(args, msg);
vsnprintf(errbuf + strlen(errbuf), ERROR_BUFFER_SIZE - strlen(errbuf) - 1, msg, args);
va_end(args);
}
}
public:
void SetXmlParseError()
{
error_buffer.fill(0);
xmlSetGenericErrorFunc(error_buffer.data(), s3fsXmlBufferParserError::ParserErrorHandler);
}
std::string GetXmlParseError() const
{
return strlen(error_buffer.data()) ? error_buffer.data() : "";
}
bool IsXmlParseError() const
{
return (0 < strlen(error_buffer.data()));
}
};
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
@ -47,9 +87,6 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list);
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
bool init_parser_xml_lock();
bool destroy_parser_xml_lock();
#endif // S3FS_S3FS_XML_H_
/*

View File

@ -20,6 +20,7 @@
#include <cstdio>
#include <cstring>
#include <string>
#include "s3objlist.h"
@ -31,7 +32,11 @@
//
// If name is terminated by "/", it is forced dir type.
// If name is terminated by "_$folder$", it is forced dir type.
// If is_dir is true and name is not terminated by "/", the name is added "/".
// If is_dir is true, the name ends with "/", or the name ends with "_$folder$",
// it will be determined to be a directory.
// If it is determined to be a directory, one of the directory types in objtype_t
// will be set to type. If it is not a directory(file, symbolic link), type will
// be set to objtype_t::UNKNOWN.
//
bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
{
@ -42,55 +47,59 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
s3obj_t::iterator iter;
std::string newname;
std::string orgname = name;
objtype_t type = objtype_t::UNKNOWN;
// Normalization
std::string::size_type pos = orgname.find("_$folder$");
if(std::string::npos != pos){
newname = orgname.substr(0, pos);
is_dir = true;
type = objtype_t::DIR_FOLDER_SUFFIX;
}else{
newname = orgname;
}
if(is_dir){
if('/' != *newname.rbegin()){
newname += "/";
if('/' == *newname.rbegin()){
if(!IS_DIR_OBJ(type)){
type = objtype_t::DIR_NORMAL;
}
}else{
if('/' == *newname.rbegin()){
is_dir = true;
if(is_dir || IS_DIR_OBJ(type)){
newname += "/";
if(!IS_DIR_OBJ(type)){
type = objtype_t::DIR_NOT_TERMINATE_SLASH;
}
}
}
// Check derived name object.
if(is_dir){
if(is_dir || IS_DIR_OBJ(type)){
std::string chkname = newname.substr(0, newname.length() - 1);
if(objects.end() != (iter = objects.find(chkname))){
if(objects.cend() != (iter = objects.find(chkname))){
// found "dir" object --> remove it.
objects.erase(iter);
}
}else{
std::string chkname = newname + "/";
if(objects.end() != (iter = objects.find(chkname))){
if(objects.cend() != (iter = objects.find(chkname))){
// found "dir/" object --> not add new object.
// and add normalization
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
return insert_normalized(orgname.c_str(), chkname.c_str(), type);
}
}
// Add object
if(objects.end() != (iter = objects.find(newname))){
if(objects.cend() != (iter = objects.find(newname))){
// Found same object --> update information.
(*iter).second.normalname.erase();
(*iter).second.orgname = orgname;
(*iter).second.is_dir = is_dir;
iter->second.normalname.clear();
iter->second.orgname = orgname;
iter->second.type = type;
if(etag){
(*iter).second.etag = etag; // over write
iter->second.etag = etag; // over write
}
}else{
// add new object
s3obj_entry newobject;
newobject.orgname = orgname;
newobject.is_dir = is_dir;
newobject.type = type;
if(etag){
newobject.etag = etag;
}
@ -98,10 +107,10 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
}
// add normalization
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
return insert_normalized(orgname.c_str(), newname.c_str(), type);
}
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
bool S3ObjList::insert_normalized(const char* name, const char* normalized, objtype_t type)
{
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
return false;
@ -111,17 +120,17 @@ bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool
}
s3obj_t::iterator iter;
if(objects.end() != (iter = objects.find(name))){
if(objects.cend() != (iter = objects.find(name))){
// found name --> over write
iter->second.orgname.erase();
iter->second.etag.erase();
iter->second.orgname.clear();
iter->second.etag.clear();
iter->second.normalname = normalized;
iter->second.is_dir = is_dir;
iter->second.type = type;
}else{
// not found --> add new object
s3obj_entry newobject;
newobject.normalname = normalized;
newobject.is_dir = is_dir;
newobject.type = type;
objects[name] = newobject;
}
return true;
@ -134,10 +143,10 @@ const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const
if(!name || '\0' == name[0]){
return nullptr;
}
if(objects.end() == (iter = objects.find(name))){
if(objects.cend() == (iter = objects.find(name))){
return nullptr;
}
return &((*iter).second);
return &(iter->second);
}
std::string S3ObjList::GetOrgName(const char* name) const
@ -189,22 +198,22 @@ bool S3ObjList::IsDir(const char* name) const
if(nullptr == (ps3obj = GetS3Obj(name))){
return false;
}
return ps3obj->is_dir;
return IS_DIR_OBJ(ps3obj->type);
}
bool S3ObjList::GetLastName(std::string& lastname) const
{
bool result = false;
lastname = "";
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
if((*iter).second.orgname.length()){
for(auto iter = objects.cbegin(); iter != objects.cend(); ++iter){
if(!iter->second.orgname.empty()){
if(lastname.compare(iter->second.orgname) < 0){
lastname = (*iter).second.orgname;
lastname = iter->second.orgname;
result = true;
}
}else{
if(lastname.compare(iter->second.normalname) < 0){
lastname = (*iter).second.normalname;
lastname = iter->second.normalname;
result = true;
}
}
@ -212,33 +221,47 @@ bool S3ObjList::GetLastName(std::string& lastname) const
return result;
}
bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const
bool S3ObjList::RawGetNames(s3obj_list_t* plist, s3obj_type_map_t* pobjmap, bool OnlyNormalized, bool CutSlash) const
{
s3obj_t::const_iterator iter;
for(iter = objects.begin(); objects.end() != iter; ++iter){
if(!plist && !pobjmap){
return false;
}
for(auto iter = objects.cbegin(); objects.cend() != iter; ++iter){
if(OnlyNormalized && !iter->second.normalname.empty()){
continue;
}
std::string name = (*iter).first;
std::string name = iter->first;
if(CutSlash && 1 < name.length() && '/' == *name.rbegin()){
// only "/" std::string is skipped this.
name.erase(name.length() - 1);
}
list.push_back(name);
if(plist){
plist->push_back(name);
}
if(pobjmap){
(*pobjmap)[name] = iter->second.type;
}
}
return true;
}
bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const
{
return RawGetNames(&list, nullptr, OnlyNormalized, CutSlash);
}
bool S3ObjList::GetNameMap(s3obj_type_map_t& objmap, bool OnlyNormalized, bool CutSlash) const
{
return RawGetNames(nullptr, &objmap, OnlyNormalized, CutSlash);
}
typedef std::map<std::string, bool> s3obj_h_t;
bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
{
s3obj_h_t h_map;
s3obj_h_t::iterator hiter;
s3obj_list_t::const_iterator liter;
for(liter = list.begin(); list.end() != liter; ++liter){
for(auto liter = list.cbegin(); list.cend() != liter; ++liter){
std::string strtmp = (*liter);
if(1 < strtmp.length() && '/' == *strtmp.rbegin()){
strtmp.erase(strtmp.length() - 1);
@ -251,7 +274,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
if(strtmp.empty() || "/" == strtmp){
break;
}
if(h_map.end() == h_map.find(strtmp)){
if(h_map.cend() == h_map.find(strtmp)){
// not found
h_map[strtmp] = false;
}
@ -259,7 +282,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
}
// check map and add lost hierarchized directory.
for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){
for(auto hiter = h_map.cbegin(); hiter != h_map.cend(); ++hiter){
if(false == (*hiter).second){
// add hierarchized directory.
std::string strtmp = (*hiter).first;

View File

@ -23,22 +23,24 @@
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "types.h"
//-------------------------------------------------------------------
// Structure / Typedef
//-------------------------------------------------------------------
struct s3obj_entry{
std::string normalname; // normalized name: if empty, object is normalized name.
std::string orgname; // original name: if empty, object is original name.
std::string normalname; // normalized name: if empty, object is normalized name.
std::string orgname; // original name: if empty, object is original name.
std::string etag;
bool is_dir;
s3obj_entry() : is_dir(false) {}
objtype_t type = objtype_t::UNKNOWN; // only set for directories, UNKNOWN for non-directories.
};
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
typedef std::vector<std::string> s3obj_list_t;
typedef std::map<std::string, objtype_t> s3obj_type_map_t;
//-------------------------------------------------------------------
// Class S3ObjList
@ -47,27 +49,26 @@ class S3ObjList
{
private:
s3obj_t objects;
public:
std::vector<std::string> common_prefixes;
private:
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
bool insert_normalized(const char* name, const char* normalized, objtype_t type);
const s3obj_entry* GetS3Obj(const char* name) const;
bool RawGetNames(s3obj_list_t* plist, s3obj_type_map_t* pobjmap, bool OnlyNormalized, bool CutSlash) const;
s3obj_t::const_iterator begin() const { return objects.begin(); }
s3obj_t::const_iterator end() const { return objects.end(); }
s3obj_t::const_iterator cbegin() const { return objects.cbegin(); }
s3obj_t::const_iterator cend() const { return objects.cend(); }
public:
S3ObjList() {}
~S3ObjList() {}
bool IsEmpty() const { return objects.empty(); }
bool insert(const char* name, const char* etag = nullptr, bool is_dir = false);
std::string GetOrgName(const char* name) const;
std::string GetNormalizedName(const char* name) const;
std::string GetETag(const char* name) const;
const std::vector<std::string>& GetCommonPrefixes() const { return common_prefixes; }
void AddCommonPrefix(std::string prefix) { common_prefixes.push_back(std::move(prefix)); }
bool IsDir(const char* name) const;
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
bool GetNameMap(s3obj_type_map_t& objmap, bool OnlyNormalized = true, bool CutSlash = true) const;
bool GetLastName(std::string& lastname) const;
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);

View File

@ -20,8 +20,11 @@
#include <cstdio>
#include <csignal>
#include <pthread.h>
#include <memory>
#include <thread>
#include <utility>
#include "psemaphore.h"
#include "s3fs_logger.h"
#include "sighandlers.h"
#include "fdcache.h"
@ -38,7 +41,7 @@ bool S3fsSignals::enableUsr1 = false;
bool S3fsSignals::Initialize()
{
if(!S3fsSignals::pSingleton){
S3fsSignals::pSingleton.reset(new S3fsSignals);
S3fsSignals::pSingleton = std::make_unique<S3fsSignals>();
}
return true;
}
@ -86,25 +89,24 @@ bool S3fsSignals::SetUsr1Handler(const char* path)
return true;
}
void* S3fsSignals::CheckCacheWorker(void* arg)
void S3fsSignals::CheckCacheWorker(Semaphore* pSem)
{
Semaphore* pSem = static_cast<Semaphore*>(arg);
if(!pSem){
pthread_exit(nullptr);
return;
}
if(!S3fsSignals::enableUsr1){
pthread_exit(nullptr);
return;
}
// wait and loop
while(S3fsSignals::enableUsr1){
// wait
pSem->wait();
pSem->acquire();
// cppcheck-suppress unmatchedSuppression
// cppcheck-suppress knownConditionTrueFalse
if(!S3fsSignals::enableUsr1){
break; // assap
break; // asap
}
// check all cache
@ -113,11 +115,8 @@ void* S3fsSignals::CheckCacheWorker(void* arg)
}
// do not allow request queuing
for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){
pSem->wait();
}
while(pSem->try_acquire());
}
return nullptr;
}
void S3fsSignals::HandlerUSR2(int sig)
@ -131,9 +130,7 @@ void S3fsSignals::HandlerUSR2(int sig)
bool S3fsSignals::InitUsr2Handler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
struct sigaction sa{};
sa.sa_handler = S3fsSignals::HandlerUSR2;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR2, &sa, nullptr)){
@ -153,9 +150,7 @@ void S3fsSignals::HandlerHUP(int sig)
bool S3fsSignals::InitHupHandler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
struct sigaction sa{};
sa.sa_handler = S3fsSignals::HandlerHUP;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGHUP, &sa, nullptr)){
@ -199,19 +194,12 @@ bool S3fsSignals::InitUsr1Handler()
}
// create thread
int result;
std::unique_ptr<Semaphore> pSemUsr1_tmp(new Semaphore(0));
std::unique_ptr<pthread_t> pThreadUsr1_tmp(new pthread_t);
if(0 != (result = pthread_create(pThreadUsr1.get(), nullptr, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1_tmp.get())))){
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
return false;
}
auto pSemUsr1_tmp = std::make_unique<Semaphore>(0);
pThreadUsr1 = std::make_unique<std::thread>(S3fsSignals::CheckCacheWorker, pSemUsr1_tmp.get());
pSemUsr1 = std::move(pSemUsr1_tmp);
pThreadUsr1 = std::move(pThreadUsr1_tmp);
// set handler
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
struct sigaction sa{};
sa.sa_handler = S3fsSignals::HandlerUSR1;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR1, &sa, nullptr)){
@ -232,15 +220,10 @@ bool S3fsSignals::DestroyUsr1Handler()
S3fsSignals::enableUsr1 = false;
// wakeup thread
pSemUsr1->post();
pSemUsr1->release();
// wait for thread exiting
void* retval = nullptr;
int result;
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
return false;
}
pThreadUsr1->join();
pSemUsr1.reset();
pThreadUsr1.reset();
@ -253,7 +236,7 @@ bool S3fsSignals::WakeupUsr1Thread()
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
return false;
}
pSemUsr1->post();
pSemUsr1->release();
return true;
}

View File

@ -22,8 +22,9 @@
#define S3FS_SIGHANDLERS_H_
#include <memory>
#include <thread>
class Semaphore;
#include "psemaphore.h"
//----------------------------------------------
// class S3fsSignals
@ -34,14 +35,14 @@ class S3fsSignals
static std::unique_ptr<S3fsSignals> pSingleton;
static bool enableUsr1;
std::unique_ptr<pthread_t> pThreadUsr1;
std::unique_ptr<std::thread> pThreadUsr1;
std::unique_ptr<Semaphore> pSemUsr1;
protected:
static S3fsSignals* get() { return pSingleton.get(); }
static void HandlerUSR1(int sig);
static void* CheckCacheWorker(void* arg);
static void CheckCacheWorker(Semaphore* pSem);
static void HandlerUSR2(int sig);
static bool InitUsr2Handler();
@ -49,18 +50,18 @@ class S3fsSignals
static void HandlerHUP(int sig);
static bool InitHupHandler();
S3fsSignals();
S3fsSignals(const S3fsSignals&) = delete;
S3fsSignals(S3fsSignals&&) = delete;
S3fsSignals& operator=(const S3fsSignals&) = delete;
S3fsSignals& operator=(S3fsSignals&&) = delete;
bool InitUsr1Handler();
bool DestroyUsr1Handler();
bool WakeupUsr1Thread();
public:
S3fsSignals();
~S3fsSignals();
S3fsSignals(const S3fsSignals&) = delete;
S3fsSignals(S3fsSignals&&) = delete;
S3fsSignals& operator=(const S3fsSignals&) = delete;
S3fsSignals& operator=(S3fsSignals&&) = delete;
static bool Initialize();
static bool Destroy();

View File

@ -18,52 +18,53 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <cerrno>
#include <climits>
#include <iomanip>
#include <sstream>
#include <string>
#include <utility>
#include <fcntl.h>
#include <sys/stat.h>
#include "s3fs_logger.h"
#include "string_util.h"
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
std::string str(const struct timespec value)
std::string str(const struct timespec& value)
{
std::ostringstream s;
s << value.tv_sec;
if(value.tv_nsec != 0){
s << "." << std::setfill('0') << std::setw(9) << value.tv_nsec;
if(UTIME_OMIT == value.tv_nsec){
s << "UTIME_OMIT";
}else if(UTIME_NOW == value.tv_nsec){
s << "UTIME_NOW";
}else{
s << value.tv_sec;
if(value.tv_nsec != 0){
s << "." << std::setfill('0') << std::setw(9) << value.tv_nsec;
}
}
return s.str();
}
#ifdef __MSYS__
/*
* Polyfill for strptime function
*
* This source code is from https://gist.github.com/jeremyfromearth/5694aa3a66714254752179ecf3c95582 .
*/
char* strptime(const char* s, const char* f, struct tm* tm)
// This source code is from https://gist.github.com/jeremyfromearth/5694aa3a66714254752179ecf3c95582 .
const char* s3fs_strptime(const char* s, const char* f, struct tm* tm)
{
std::istringstream input(s);
// TODO: call to setlocale required?
input.imbue(std::locale(setlocale(LC_ALL, nullptr)));
input >> std::get_time(tm, f);
if (input.fail()) {
return nullptr;
}
return (char*)(s + input.tellg());
return s + input.tellg();
}
#endif
bool s3fs_strtoofft(off_t* value, const char* str, int base)
{
@ -97,10 +98,13 @@ off_t cvt_strtoofft(const char* str, int base)
std::string lower(std::string s)
{
// change each character of the std::string to lower case
for(size_t i = 0; i < s.length(); i++){
s[i] = tolower(s[i]);
}
std::transform(s.cbegin(), s.cend(), s.begin(), ::tolower);
return s;
}
std::string upper(std::string s)
{
std::transform(s.cbegin(), s.cend(), s.begin(), ::toupper);
return s;
}
@ -124,12 +128,14 @@ std::string trim(std::string s, const char *t /* = SPACES */)
return trim_left(trim_right(std::move(s), t), t);
}
std::string peeloff(const std::string& s)
std::string peeloff(std::string s)
{
if(s.size() < 2 || *s.begin() != '"' || *s.rbegin() != '"'){
if(s.size() < 2 || *s.cbegin() != '"' || *s.rbegin() != '"'){
return s;
}
return s.substr(1, s.size() - 2);
s.erase(s.size() - 1);
s.erase(0, 1);
return s;
}
//
@ -296,7 +302,7 @@ bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
}
struct tm tm;
const char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
const char* prest = s3fs_strptime(pdate, "%Y-%m-%dT%T", &tm);
if(prest == pdate){
// wrong format
return false;
@ -402,25 +408,23 @@ std::string s3fs_base64(const unsigned char* input, size_t length)
return result;
}
inline unsigned char char_decode64(const char ch)
static constexpr unsigned char char_decode64(const char ch)
{
unsigned char by;
if('A' <= ch && ch <= 'Z'){ // A - Z
by = static_cast<unsigned char>(ch - 'A');
return static_cast<unsigned char>(ch - 'A');
}else if('a' <= ch && ch <= 'z'){ // a - z
by = static_cast<unsigned char>(ch - 'a' + 26);
return static_cast<unsigned char>(ch - 'a' + 26);
}else if('0' <= ch && ch <= '9'){ // 0 - 9
by = static_cast<unsigned char>(ch - '0' + 52);
return static_cast<unsigned char>(ch - '0' + 52);
}else if('+' == ch){ // +
by = 62;
return 62;
}else if('/' == ch){ // /
by = 63;
return 63;
}else if('=' == ch){ // =
by = 64;
return 64;
}else{ // something wrong
by = UCHAR_MAX;
return UCHAR_MAX;
}
return by;
}
std::string s3fs_decode64(const char* input, size_t input_len)
@ -507,7 +511,7 @@ bool s3fs_wtf8_encode(const char *s, std::string *result)
// four byte encoding
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
if (code >= 0x10000 && code <= 0x10ffff) {
if (code >= 0x10'000 && code <= 0x10f'fff) {
// not overlong and in defined unicode space
if (result) {
*result += c;

View File

@ -22,7 +22,9 @@
#define S3FS_STRING_UTIL_H_
#include <cstring>
#include <ctime>
#include <string>
#include <strings.h>
//
// A collection of string utilities for manipulating URLs and HTTP responses.
@ -35,7 +37,15 @@ static constexpr char SPACES[] = " \t\r\n";
//-------------------------------------------------------------------
// Inline functions
//-------------------------------------------------------------------
static inline int is_prefix(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; }
class CaseInsensitiveStringView {
public:
explicit CaseInsensitiveStringView(const std::string &str) : str(str.c_str()) {}
bool operator==(const char *other) const { return strcasecmp(str, other) == 0; }
bool is_prefix(const char *prefix) const { return strncasecmp(str, prefix, strlen(prefix)) == 0; }
private:
const char *str;
};
static inline bool is_prefix(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; }
static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
//-------------------------------------------------------------------
@ -53,14 +63,12 @@ static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strpt
// Utilities
//-------------------------------------------------------------------
// TODO: rename to to_string?
std::string str(const struct timespec value);
std::string str(const struct timespec& value);
#ifdef __MSYS__
//
// Polyfill for strptime function.
// Cross-platform strptime
//
char* strptime(const char* s, const char* f, struct tm* tm);
#endif
const char* s3fs_strptime(const char* s, const char* f, struct tm* tm);
//
// Convert string to off_t. Returns false on bad input.
// Replacement for C++11 std::stoll.
@ -79,7 +87,8 @@ std::string trim_left(std::string s, const char *t = SPACES);
std::string trim_right(std::string s, const char *t = SPACES);
std::string trim(std::string s, const char *t = SPACES);
std::string lower(std::string s);
std::string peeloff(const std::string& s);
std::string upper(std::string s);
std::string peeloff(std::string s);
//
// Date string

View File

@ -1,7 +1,7 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -18,54 +18,50 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include <cstdlib>
#include <cerrno>
#include "autolock.h"
#include "s3fs_logger.h"
#include "syncfiller.h"
//-------------------------------------------------------------------
// Class AutoLock
// Class SyncFiller
//-------------------------------------------------------------------
AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex)
SyncFiller::SyncFiller(void* buff, fuse_fill_dir_t filler) : filler_buff(buff), filler_func(filler)
{
if (type == ALREADY_LOCKED) {
is_lock_acquired = false;
} else if (type == NO_WAIT) {
int result = pthread_mutex_trylock(auto_mutex);
if(result == 0){
is_lock_acquired = true;
}else if(result == EBUSY){
is_lock_acquired = false;
}else{
S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", result);
abort();
}
} else {
int result = pthread_mutex_lock(auto_mutex);
if(result == 0){
is_lock_acquired = true;
}else{
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
abort();
}
if(!filler_buff || !filler_func){
S3FS_PRN_CRIT("Internal error: SyncFiller constructor parameter is critical value.");
abort();
}
}
bool AutoLock::isLockAcquired() const
//
// See. prototype fuse_fill_dir_t in fuse.h
//
int SyncFiller::Fill(const std::string& name, const struct stat *stbuf, off_t off)
{
return is_lock_acquired;
const std::lock_guard<std::mutex> lock(filler_lock);
int result = 0;
if(filled.insert(name).second){
result = filler_func(filler_buff, name.c_str(), stbuf, off);
}
return result;
}
AutoLock::~AutoLock()
int SyncFiller::SufficiencyFill(const std::vector<std::string>& pathlist)
{
if (is_lock_acquired) {
int result = pthread_mutex_unlock(auto_mutex);
if(result != 0){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", result);
abort();
const std::lock_guard<std::mutex> lock(filler_lock);
int result = 0;
for(auto it = pathlist.cbegin(); it != pathlist.cend(); ++it) {
if(filled.insert(*it).second){
if(0 != filler_func(filler_buff, it->c_str(), nullptr, 0)){
result = 1;
}
}
}
return result;
}
/*

View File

@ -18,47 +18,44 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_CURL_HANDLERPOOL_H_
#define S3FS_CURL_HANDLERPOOL_H_
#ifndef SYNCFILLER_H_
#define SYNCFILLER_H_
#include <cassert>
#include <curl/curl.h>
#include <list>
#include <string>
#include <mutex>
#include <vector>
#include <set>
#include "s3fs.h"
//----------------------------------------------
// Typedefs
// class SyncFiller
//----------------------------------------------
typedef std::list<CURL*> hcurllist_t;
//----------------------------------------------
// class CurlHandlerPool
//----------------------------------------------
class CurlHandlerPool
//
// A synchronous class that calls the fuse_fill_dir_t
// function that processes the readdir data
//
class SyncFiller
{
public:
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
{
assert(maxHandlers > 0);
}
CurlHandlerPool(const CurlHandlerPool&) = delete;
CurlHandlerPool(CurlHandlerPool&&) = delete;
CurlHandlerPool& operator=(const CurlHandlerPool&) = delete;
CurlHandlerPool& operator=(CurlHandlerPool&&) = delete;
bool Init();
bool Destroy();
CURL* GetHandler(bool only_pool);
void ReturnHandler(CURL* hCurl, bool restore_pool);
void ResetHandler(CURL* hCurl);
private:
int mMaxHandlers;
pthread_mutex_t mLock;
hcurllist_t mPool;
mutable std::mutex filler_lock;
void* filler_buff;
fuse_fill_dir_t filler_func;
std::set<std::string> filled;
public:
explicit SyncFiller(void* buff = nullptr, fuse_fill_dir_t filler = nullptr);
~SyncFiller() = default;
SyncFiller(const SyncFiller&) = delete;
SyncFiller(SyncFiller&&) = delete;
SyncFiller& operator=(const SyncFiller&) = delete;
SyncFiller& operator=(SyncFiller&&) = delete;
int Fill(const std::string& name, const struct stat *stbuf, off_t off);
int SufficiencyFill(const std::vector<std::string>& pathlist);
};
#endif // S3FS_CURL_HANDLERPOOL_H_
#endif // SYNCFILLER_H_
/*
* Local variables:

View File

@ -150,7 +150,7 @@ void test_slist_remove()
curl_slist_free_all(list);
}
int main(int argc, char *argv[])
int main(int argc, const char *argv[])
{
test_sort_insert();
test_slist_remove();

View File

@ -22,7 +22,8 @@
#include "fdcache_stat.h"
#include "test_util.h"
bool CacheFileStat::Open() { return false; }
bool CacheFileStat::Open() { return false; } // NOLINT(readability-convert-member-functions-to-static)
bool CacheFileStat::OverWriteFile(const std::string& strall) const { return false; } // NOLINT(readability-convert-member-functions-to-static)
void test_compress()
{
@ -72,7 +73,7 @@ void test_compress()
ASSERT_EQUALS(off_t(36), size);
}
int main(int argc, char *argv[])
int main(int argc, const char *argv[])
{
test_compress();
return 0;

View File

@ -27,6 +27,8 @@
#include "string_util.h"
#include "test_util.h"
using namespace std::string_literals;
//-------------------------------------------------------------------
// Global variables for test_string_util
//-------------------------------------------------------------------
@ -35,29 +37,29 @@ std::string instance_name;
void test_trim()
{
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim("1234"));
ASSERT_EQUALS("1234"s, trim(" 1234 "));
ASSERT_EQUALS("1234"s, trim("1234 "));
ASSERT_EQUALS("1234"s, trim(" 1234"));
ASSERT_EQUALS("1234"s, trim("1234"));
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
ASSERT_EQUALS("1234 "s, trim_left(" 1234 "));
ASSERT_EQUALS("1234 "s, trim_left("1234 "));
ASSERT_EQUALS("1234"s, trim_left(" 1234"));
ASSERT_EQUALS("1234"s, trim_left("1234"));
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
ASSERT_EQUALS(" 1234"s, trim_right(" 1234 "));
ASSERT_EQUALS("1234"s, trim_right("1234 "));
ASSERT_EQUALS(" 1234"s, trim_right(" 1234"));
ASSERT_EQUALS("1234"s, trim_right("1234"));
ASSERT_EQUALS(std::string("1234"), peeloff("\"1234\"")); // "1234" -> 1234
ASSERT_EQUALS(std::string("\"1234\""), peeloff("\"\"1234\"\"")); // ""1234"" -> "1234"
ASSERT_EQUALS(std::string("\"1234"), peeloff("\"\"1234\"")); // ""1234" -> "1234
ASSERT_EQUALS(std::string("1234\""), peeloff("\"1234\"\"")); // "1234"" -> 1234"
ASSERT_EQUALS(std::string("\"1234"), peeloff("\"1234")); // "1234 -> "1234
ASSERT_EQUALS(std::string("1234\""), peeloff("1234\"")); // 1234" -> 1234"
ASSERT_EQUALS(std::string(" \"1234\""), peeloff(" \"1234\"")); // _"1234" -> _"1234"
ASSERT_EQUALS(std::string("\"1234\" "), peeloff("\"1234\" ")); // "1234"_ -> "1234"_
ASSERT_EQUALS("1234"s, peeloff("\"1234\"")); // "1234" -> 1234
ASSERT_EQUALS("\"1234\""s, peeloff("\"\"1234\"\"")); // ""1234"" -> "1234"
ASSERT_EQUALS("\"1234"s, peeloff("\"\"1234\"")); // ""1234" -> "1234
ASSERT_EQUALS("1234\""s, peeloff("\"1234\"\"")); // "1234"" -> 1234"
ASSERT_EQUALS("\"1234"s, peeloff("\"1234")); // "1234 -> "1234
ASSERT_EQUALS("1234\""s, peeloff("1234\"")); // 1234" -> 1234"
ASSERT_EQUALS(" \"1234\""s, peeloff(" \"1234\"")); // _"1234" -> _"1234"
ASSERT_EQUALS("\"1234\" "s, peeloff("\"1234\" ")); // "1234"_ -> "1234"_
}
void test_base64()
@ -65,30 +67,30 @@ void test_base64()
std::string buf;
char tmpbuf = '\0';
ASSERT_EQUALS(s3fs_base64(nullptr, 0), std::string(""));
ASSERT_EQUALS(s3fs_base64(nullptr, 0), ""s);
buf = s3fs_decode64(nullptr, 0);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), std::string(""));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), ""s);
buf = s3fs_decode64("", 0);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), std::string("MQ=="));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ=="s);
buf = s3fs_decode64("MQ==", 4);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1", 1);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(1));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), std::string("MTI="));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI="s);
buf = s3fs_decode64("MTI=", 4);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "12", 2);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(2));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), std::string("MTIz"));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz"s);
buf = s3fs_decode64("MTIz", 4);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "123", 3);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(3));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), std::string("MTIzNA=="));
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA=="s);
buf = s3fs_decode64("MTIzNA==", 8);
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1234", 4);
ASSERT_EQUALS(buf.length(), static_cast<size_t>(4));
@ -121,7 +123,7 @@ void test_strtoofft()
ASSERT_EQUALS(value, static_cast<off_t>(15L));
ASSERT_TRUE(s3fs_strtoofft(&value, "deadbeef", /*base=*/ 16));
ASSERT_EQUALS(value, static_cast<off_t>(3735928559L));
ASSERT_EQUALS(value, static_cast<off_t>(3'735'928'559L));
}
void test_wtf8_encoding()
@ -130,7 +132,7 @@ void test_wtf8_encoding()
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
std::string broken = utf8;
broken[14] = 0x97;
broken[14] = '\x97';
std::string mixed = ascii + utf8 + cp1252;
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
@ -197,7 +199,7 @@ void test_cr_encoding()
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crper2.c_str()).c_str()), base_mid_crper2);
}
int main(int argc, char *argv[])
int main(int argc, const char *argv[])
{
S3fsLog singletonLog;

View File

@ -21,13 +21,14 @@
#ifndef S3FS_TEST_UTIL_H_
#define S3FS_TEST_UTIL_H_
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <string>
#include "string_util.h"
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
template <typename T> inline void assert_equals(const T &x, const T &y, const char *file, int line)
{
if (x != y) {
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
@ -36,7 +37,7 @@ template <typename T> void assert_equals(const T &x, const T &y, const char *fil
}
}
template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
template <> inline void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
{
if (x != y) {
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
@ -47,7 +48,7 @@ template <> void assert_equals(const std::string &x, const std::string &y, const
}
template <typename T> void assert_nequals(const T &x, const T &y, const char *file, int line)
template <typename T> inline void assert_nequals(const T &x, const T &y, const char *file, int line)
{
if (x == y) {
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
@ -55,7 +56,7 @@ template <typename T> void assert_nequals(const T &x, const T &y, const char *fi
}
}
template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
template <> inline void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
{
if (x == y) {
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
@ -65,7 +66,7 @@ template <> void assert_nequals(const std::string &x, const std::string &y, cons
}
}
void assert_strequals(const char *x, const char *y, const char *file, int line)
inline void assert_strequals(const char *x, const char *y, const char *file, int line)
{
if(x == nullptr && y == nullptr){
return;
@ -76,7 +77,7 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
}
}
void assert_bufequals(const char *x, size_t len1, const char *y, size_t len2, const char *file, int line)
inline void assert_bufequals(const char *x, size_t len1, const char *y, size_t len2, const char *file, int line)
{
if(x == nullptr && y == nullptr){
return;

View File

@ -19,18 +19,23 @@
*/
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <future>
#include <mutex>
#include <thread>
#include <utility>
#include "s3fs_logger.h"
#include "threadpoolman.h"
#include "autolock.h"
#include "curl.h"
#include "curl_share.h"
//------------------------------------------------
// ThreadPoolMan class variables
//------------------------------------------------
ThreadPoolMan* ThreadPoolMan::singleton = nullptr;
int ThreadPoolMan::worker_count = 10; // default
std::unique_ptr<ThreadPoolMan> ThreadPoolMan::singleton;
//------------------------------------------------
// ThreadPoolMan class methods
@ -38,109 +43,156 @@ ThreadPoolMan* ThreadPoolMan::singleton = nullptr;
bool ThreadPoolMan::Initialize(int count)
{
if(ThreadPoolMan::singleton){
S3FS_PRN_WARN("Already singleton for Thread Manager is existed, then re-create it.");
ThreadPoolMan::Destroy();
S3FS_PRN_CRIT("Already singleton for Thread Manager exists.");
abort();
}
ThreadPoolMan::singleton = new ThreadPoolMan(count);
if(-1 != count){
ThreadPoolMan::SetWorkerCount(count);
}
ThreadPoolMan::singleton = std::make_unique<ThreadPoolMan>(ThreadPoolMan::worker_count);
return true;
}
void ThreadPoolMan::Destroy()
{
if(ThreadPoolMan::singleton){
delete ThreadPoolMan::singleton;
ThreadPoolMan::singleton = nullptr;
}
ThreadPoolMan::singleton.reset();
}
bool ThreadPoolMan::Instruct(std::unique_ptr<thpoolman_param> pparam)
int ThreadPoolMan::SetWorkerCount(int count)
{
if(0 >= count){
S3FS_PRN_ERR("Thread worker count(%d) must be positive number.", count);
return -1;
}
if(count == ThreadPoolMan::worker_count){
return ThreadPoolMan::worker_count;
}
// [TODO]
// If we need to dynamically change worker threads, this is
// where we would terminate/add workers.
//
int old = ThreadPoolMan::worker_count;
ThreadPoolMan::worker_count = count;
return old;
}
bool ThreadPoolMan::Instruct(const thpoolman_param& param)
{
if(!ThreadPoolMan::singleton){
S3FS_PRN_WARN("The singleton object is not initialized yet.");
return false;
}
return ThreadPoolMan::singleton->SetInstruction(std::move(pparam));
if(!param.psem){
S3FS_PRN_ERR("Thread parameter Semaphore is null.");
return false;
}
ThreadPoolMan::singleton->SetInstruction(param);
return true;
}
bool ThreadPoolMan::AwaitInstruct(const thpoolman_param& param)
{
if(!ThreadPoolMan::singleton){
S3FS_PRN_WARN("The singleton object is not initialized yet.");
return false;
}
if(param.psem){
S3FS_PRN_ERR("Thread parameter Semaphore must be null.");
return false;
}
// Setup local thpoolman_param structure with local Semaphore
thpoolman_param local_param;
Semaphore await_sem(0);
local_param.args = param.args;
local_param.psem = &await_sem;
local_param.pfunc = param.pfunc;
// Set parameters and run thread worker
ThreadPoolMan::singleton->SetInstruction(local_param);
// wait until the thread is complete
await_sem.acquire();
return true;
}
//
// Thread worker
//
void* ThreadPoolMan::Worker(void* arg)
void ThreadPoolMan::Worker(ThreadPoolMan* psingleton, std::promise<int> promise)
{
ThreadPoolMan* psingleton = static_cast<ThreadPoolMan*>(arg);
if(!psingleton){
S3FS_PRN_ERR("The parameter for worker thread is invalid.");
return reinterpret_cast<void*>(-EIO);
promise.set_value(-EIO);
return;
}
S3FS_PRN_INFO3("Start worker thread in ThreadPoolMan.");
// The only object in this thread worker
S3fsCurl s3fscurl(true);
while(!psingleton->IsExit()){
// wait
psingleton->thpoolman_sem.wait();
psingleton->thpoolman_sem.acquire();
if(psingleton->IsExit()){
break;
}
// get instruction
std::unique_ptr<thpoolman_param> pparam;
{
AutoLock auto_lock(&(psingleton->thread_list_lock));
// reset curl handle
if(!s3fscurl.CreateCurlHandle(true)){
S3FS_PRN_ERR("Failed to re-create curl handle.");
break;
}
if(!psingleton->instruction_list.empty()){
pparam = std::move(psingleton->instruction_list.front());
psingleton->instruction_list.pop_front();
if(!pparam){
S3FS_PRN_WARN("Got a semaphore, but the instruction is empty.");
}
// get instruction
thpoolman_param param;
{
const std::lock_guard<std::mutex> lock(psingleton->thread_list_lock);
if(psingleton->instruction_list.empty()){
S3FS_PRN_DBG("Got a semaphore, but the instruction is empty.");
continue;
}else{
S3FS_PRN_WARN("Got a semaphore, but there is no instruction.");
pparam = nullptr;
param = psingleton->instruction_list.front();
psingleton->instruction_list.pop_front();
}
}
if(pparam){
void* retval = pparam->pfunc(pparam->args);
if(nullptr != retval){
S3FS_PRN_WARN("The instruction function returned with somthign error code(%ld).", reinterpret_cast<long>(retval));
}
if(pparam->psem){
pparam->psem->post();
}
// run function
void* retval;
if(nullptr != (retval = param.pfunc(s3fscurl, param.args))){
S3FS_PRN_WARN("The instruction function returned with something error code(%ld).", reinterpret_cast<long>(retval));
}
if(param.psem){
param.psem->release();
}
}
return nullptr;
if(!S3fsCurlShare::DestroyCurlShareHandleForThread()){
S3FS_PRN_WARN("Failed to destroy curl share handle for this thread, but continue...");
}
promise.set_value(0);
}
//------------------------------------------------
// ThreadPoolMan methods
//------------------------------------------------
ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0), is_lock_init(false)
ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0)
{
if(count < 1){
S3FS_PRN_CRIT("Failed to creating singleton for Thread Manager, because thread count(%d) is under 1.", count);
abort();
}
if(ThreadPoolMan::singleton){
S3FS_PRN_CRIT("Already singleton for Thread Manager is existed.");
S3FS_PRN_CRIT("Already singleton for Thread Manager exists.");
abort();
}
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
int result;
if(0 != (result = pthread_mutex_init(&thread_list_lock, &attr))){
S3FS_PRN_CRIT("failed to init thread_list_lock: %d", result);
abort();
}
is_lock_init = true;
// create threads
if(!StartThreads(count)){
S3FS_PRN_ERR("Failed starting threads at initializing.");
@ -151,15 +203,6 @@ ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0), is_l
ThreadPoolMan::~ThreadPoolMan()
{
StopThreads();
if(is_lock_init){
int result;
if(0 != (result = pthread_mutex_destroy(&thread_list_lock))){
S3FS_PRN_CRIT("failed to destroy thread_list_lock: %d", result);
abort();
}
is_lock_init = false;
}
}
bool ThreadPoolMan::IsExit() const
@ -174,6 +217,8 @@ void ThreadPoolMan::SetExitFlag(bool exit_flag)
bool ThreadPoolMan::StopThreads()
{
const std::lock_guard<std::mutex> lock(thread_list_lock);
if(thread_list.empty()){
S3FS_PRN_INFO("Any threads are running now, then nothing to do.");
return true;
@ -182,23 +227,19 @@ bool ThreadPoolMan::StopThreads()
// all threads to exit
SetExitFlag(true);
for(size_t waitcnt = thread_list.size(); 0 < waitcnt; --waitcnt){
thpoolman_sem.post();
thpoolman_sem.release();
}
// wait for threads exiting
for(thread_list_t::const_iterator iter = thread_list.begin(); iter != thread_list.end(); ++iter){
void* retval = nullptr;
int result = pthread_join(*iter, &retval);
if(result){
S3FS_PRN_ERR("failed pthread_join - result(%d)", result);
}else{
S3FS_PRN_DBG("succeed pthread_join - return code(%ld)", reinterpret_cast<long>(retval));
}
for(auto& pair : thread_list){
pair.first.join();
long retval = pair.second.get();
S3FS_PRN_DBG("join succeeded - return code(%ld)", reinterpret_cast<long>(retval));
}
thread_list.clear();
// reset semaphore(to zero)
while(thpoolman_sem.try_wait()){
while(thpoolman_sem.try_acquire()){
}
return true;
@ -223,35 +264,26 @@ bool ThreadPoolMan::StartThreads(int count)
SetExitFlag(false);
for(int cnt = 0; cnt < count; ++cnt){
// run thread
pthread_t thread;
int result;
if(0 != (result = pthread_create(&thread, nullptr, ThreadPoolMan::Worker, static_cast<void*>(this)))){
S3FS_PRN_ERR("failed pthread_create with return code(%d)", result);
StopThreads(); // if possible, stop all threads
return false;
}
thread_list.push_back(thread);
std::promise<int> promise;
std::future<int> future = promise.get_future();
std::thread thread(ThreadPoolMan::Worker, this, std::move(promise));
const std::lock_guard<std::mutex> lock(thread_list_lock);
thread_list.emplace_back(std::move(thread), std::move(future));
}
return true;
}
bool ThreadPoolMan::SetInstruction(std::unique_ptr<thpoolman_param> pparam)
void ThreadPoolMan::SetInstruction(const thpoolman_param& param)
{
if(!pparam){
S3FS_PRN_ERR("The parameter value is nullptr.");
return false;
}
// set parameter to list
{
AutoLock auto_lock(&thread_list_lock);
instruction_list.push_back(std::move(pparam));
const std::lock_guard<std::mutex> lock(thread_list_lock);
instruction_list.push_back(param);
}
// run thread
thpoolman_sem.post();
return true;
thpoolman_sem.release();
}
/*

View File

@ -22,19 +22,23 @@
#define S3FS_THREADPOOLMAN_H_
#include <atomic>
#include <future>
#include <list>
#include <memory>
#include <mutex>
#include <vector>
#include "common.h"
#include "psemaphore.h"
//------------------------------------------------
// Typedefs for functions and structures
//------------------------------------------------
class S3fsCurl;
//
// Prototype function
//
typedef void* (*thpoolman_worker)(void*); // same as start_routine for pthread_create function
typedef void* (*thpoolman_worker)(S3fsCurl&, void*);
//
// Parameter structure
@ -46,16 +50,12 @@ typedef void* (*thpoolman_worker)(void*); // same as start_routine
//
struct thpoolman_param
{
void* args;
Semaphore* psem;
thpoolman_worker pfunc;
thpoolman_param() : args(nullptr), psem(nullptr), pfunc(nullptr) {}
void* args = nullptr;
Semaphore* psem = nullptr;
thpoolman_worker pfunc = nullptr;
};
typedef std::list<std::unique_ptr<thpoolman_param>> thpoolman_params_t;
typedef std::vector<pthread_t> thread_list_t;
typedef std::list<thpoolman_param> thpoolman_params_t;
//------------------------------------------------
// Class ThreadPoolMan
@ -63,20 +63,27 @@ typedef std::vector<pthread_t> thread_list_t;
class ThreadPoolMan
{
private:
static ThreadPoolMan* singleton;
static int worker_count;
static std::unique_ptr<ThreadPoolMan> singleton;
std::atomic<bool> is_exit;
Semaphore thpoolman_sem;
bool is_lock_init;
pthread_mutex_t thread_list_lock;
thread_list_t thread_list;
thpoolman_params_t instruction_list;
std::mutex thread_list_lock;
std::vector<std::pair<std::thread, std::future<int>>> thread_list GUARDED_BY(thread_list_lock);
thpoolman_params_t instruction_list GUARDED_BY(thread_list_lock);
private:
static void* Worker(void* arg);
static void Worker(ThreadPoolMan* psingleton, std::promise<int> promise);
bool IsExit() const;
void SetExitFlag(bool exit_flag);
bool StopThreads();
bool StartThreads(int count);
void SetInstruction(const thpoolman_param& pparam);
public:
explicit ThreadPoolMan(int count = 1);
~ThreadPoolMan();
ThreadPoolMan(const ThreadPoolMan&) = delete;
@ -84,17 +91,12 @@ class ThreadPoolMan
ThreadPoolMan& operator=(const ThreadPoolMan&) = delete;
ThreadPoolMan& operator=(ThreadPoolMan&&) = delete;
bool IsExit() const;
void SetExitFlag(bool exit_flag);
bool StopThreads();
bool StartThreads(int count);
bool SetInstruction(std::unique_ptr<thpoolman_param> pparam);
public:
static bool Initialize(int count);
static bool Initialize(int count = -1);
static void Destroy();
static bool Instruct(std::unique_ptr<thpoolman_param> pparam);
static int SetWorkerCount(int count);
static int GetWorkerCount() { return ThreadPoolMan::worker_count; }
static bool Instruct(const thpoolman_param& pparam);
static bool AwaitInstruct(const thpoolman_param& param);
};
#endif // S3FS_THREADPOOLMAN_H_

View File

@ -22,10 +22,12 @@
#define S3FS_TYPES_H_
#include <cstdlib>
#include <cstdint>
#include <cstring>
#include <string>
#include <map>
#include <list>
#include <utility>
#include <vector>
//
@ -53,7 +55,7 @@ typedef std::map<std::string, std::string> xattrs_t;
//-------------------------------------------------------------------
// acl_t
//-------------------------------------------------------------------
enum class acl_t{
enum class acl_t : uint8_t {
PRIVATE,
PUBLIC_READ,
PUBLIC_READ_WRITE,
@ -65,7 +67,7 @@ enum class acl_t{
UNKNOWN
};
inline const char* str(acl_t value)
constexpr const char* str(acl_t value)
{
switch(value){
case acl_t::PRIVATE:
@ -116,14 +118,14 @@ inline acl_t to_acl(const char *acl)
//-------------------------------------------------------------------
// sse_type_t
//-------------------------------------------------------------------
enum class sse_type_t{
enum class sse_type_t : uint8_t {
SSE_DISABLE = 0, // not use server side encrypting
SSE_S3, // server side encrypting by S3 key
SSE_C, // server side encrypting by custom key
SSE_KMS // server side encrypting by kms id
};
enum class signature_type_t {
enum class signature_type_t : uint8_t {
V2_ONLY,
V4_ONLY,
V2_OR_V4
@ -149,7 +151,7 @@ struct etagpair
void clear()
{
etag.erase();
etag.clear();
part_num = -1;
}
};
@ -184,7 +186,7 @@ struct petagpool
//
struct filepart
{
bool uploaded; // does finish uploading
bool uploaded = false; // does finish uploading
std::string etag; // expected etag value
int fd; // base file(temporary full file) descriptor
off_t startpos; // seek fd point for uploading
@ -192,7 +194,7 @@ struct filepart
bool is_copy; // whether is copy multipart
etagpair* petag; // use only parallel upload
explicit filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = nullptr) : uploaded(false), fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
explicit filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = nullptr) : fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
~filepart()
{
@ -215,7 +217,7 @@ struct filepart
if(-1 == partnum){
partnum = static_cast<int>(list.size()) + 1;
}
list.push_back(etagpair(nullptr, partnum));
list.emplace_back(nullptr, partnum);
petag = &list.back();
}
@ -267,7 +269,7 @@ struct untreatedpart
// Check if the areas overlap
// However, even if the areas do not overlap, this method returns true if areas are adjacent.
//
bool check_overlap(off_t chk_start, off_t chk_size)
bool check_overlap(off_t chk_start, off_t chk_size) const
{
if(chk_start < 0 || chk_size <= 0 || start < 0 || size <= 0 || (chk_start + chk_size) < start || (start + size) < chk_start){
return false;
@ -310,7 +312,7 @@ typedef std::vector<struct mp_part> mp_part_list_t;
inline off_t total_mp_part_list(const mp_part_list_t& mplist)
{
off_t size = 0;
for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){
for(auto iter = mplist.cbegin(); iter != mplist.cend(); ++iter){
size += iter->size;
}
return size;
@ -344,6 +346,109 @@ struct case_insensitive_compare_func
};
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
//-------------------------------------------------------------------
// S3 Object Type Enum : objtype_t
//-------------------------------------------------------------------
// The type defines what files, symlinks, and directories can be
// represented in S3.
// The stats cache has a negative cache, which also defines its type.
// Directory objects can have multiple types depending on the client
// that created them.
// To accommodate these, this enumeration also defines the type of the
// original object.
//
enum class objtype_t : int8_t {
UNKNOWN = -1,
FILE = 0,
SYMLINK = 1,
DIR_NORMAL = 2,
DIR_NOT_TERMINATE_SLASH = 3,
DIR_FOLDER_SUFFIX = 4,
DIR_NOT_EXIST_OBJECT = 5,
NEGATIVE = 6 // Negative type means an object does not exist in Stats cache.
};
constexpr bool IS_FILE_OBJ(objtype_t type)
{
return (objtype_t::FILE == type);
}
constexpr bool IS_SYMLINK_OBJ(objtype_t type)
{
return (objtype_t::SYMLINK == type);
}
constexpr bool IS_NORMALDIR_OBJ(objtype_t type)
{
return (objtype_t::DIR_NORMAL == type);
}
constexpr bool IS_DIR_OBJ(objtype_t type)
{
return (objtype_t::DIR_NORMAL == type || objtype_t::DIR_NOT_TERMINATE_SLASH == type || objtype_t::DIR_FOLDER_SUFFIX == type || objtype_t::DIR_NOT_EXIST_OBJECT == type);
}
constexpr bool IS_NEGATIVE_OBJ(objtype_t type)
{
return (objtype_t::NEGATIVE == type);
}
constexpr bool IS_SAME_OBJ(objtype_t type1, objtype_t type2)
{
if(type1 == type2){
return true;
}
if(IS_DIR_OBJ(type1) && IS_DIR_OBJ(type2)){
return true;
}
return false;
}
constexpr bool NEED_REPLACEDIR_OBJ(objtype_t type)
{
return (objtype_t::DIR_NOT_TERMINATE_SLASH == type || objtype_t::DIR_FOLDER_SUFFIX == type || objtype_t::DIR_NOT_EXIST_OBJECT == type);
}
constexpr bool NEED_RMDIR_OBJ(objtype_t type)
{
return (objtype_t::DIR_NOT_TERMINATE_SLASH == type || objtype_t::DIR_FOLDER_SUFFIX == type);
}
inline std::string STR_OBJTYPE(objtype_t type)
{
std::string strType;
switch(type){
case objtype_t::UNKNOWN:
strType = "UNKNOWN(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::FILE:
strType = "FILE(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::SYMLINK:
strType = "SYMLINK(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::DIR_NORMAL:
strType = "DIR_NORMAL(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::DIR_NOT_TERMINATE_SLASH:
strType = "DIR_NOT_TERMINATE_SLASH(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::DIR_FOLDER_SUFFIX:
strType = "DIR_FOLDER_SUFFIX(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::DIR_NOT_EXIST_OBJECT:
strType = "DIR_NOT_EXIST_OBJECT(" + std::to_string(static_cast<int>(type)) + ")";
break;
case objtype_t::NEGATIVE:
strType = "NEGATIVE(" + std::to_string(static_cast<int>(type)) + ")";
break;
default:
strType = "not defined value(" + std::to_string(static_cast<int>(type)) + ")";
break;
}
return strType;
}
//-------------------------------------------------------------------
// Typedefs specialized for use
//-------------------------------------------------------------------

View File

@ -43,7 +43,7 @@ truncate_read_file_SOURCES = truncate_read_file.cc
cr_filename_SOURCES = cr_filename.cc
clang-tidy:
clang-tidy \
clang-tidy -extra-arg=-std=@CPP_VERSION@ \
$(junk_data_SOURCES) \
$(write_multiblock_SOURCES) \
$(mknod_test_SOURCES) \

View File

@ -34,7 +34,7 @@
int main(int argc, const char *argv[])
{
if(argc != 2){
fprintf(stderr, "[ERROR] Wrong paraemters\n");
fprintf(stderr, "[ERROR] Wrong parameters\n");
fprintf(stdout, "[Usage] cr_filename <base file path>\n");
exit(EXIT_FAILURE);
}

View File

@ -36,9 +36,9 @@ SUITELOG="${TOPDIR}/test-suite.log"
TMP_LINENO_FILE="/tmp/.lineno.tmp"
while [ $# -ne 0 ]; do
if [ "X$1" = "X" ]; then
if [ "$1" = "" ]; then
break
elif [ "X$1" = "X-h" ] || [ "X$1" = "X-H" ] || [ "X$1" = "X--help" ] || [ "X$1" = "X--HELP" ]; then
elif [ "$1" = "-h" ] || [ "$1" = "-H" ] || [ "$1" = "--help" ] || [ "$1" = "--HELP" ]; then
func_usage "${PRGNAME}"
exit 0
else
@ -117,7 +117,7 @@ done < "${TMP_LINENO_FILE}"
#
# Print rest lines
#
file_line_cnt=$(wc -l "${SUITELOG}" | awk '{print $1}')
file_line_cnt=$(wc -l < "${SUITELOG}")
tail_line_cnt=$((file_line_cnt - prev_line_number))
if [ "${prev_line_type}" -eq 1 ]; then

View File

@ -84,16 +84,21 @@ fi
export TEST_BUCKET_1
export S3_URL
export S3_ENDPOINT
export S3PROXY_CACERT_FILE
TEST_SCRIPT_DIR=$(pwd)
export TEST_SCRIPT_DIR
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
S3PROXY_VERSION="2.0.0"
S3PROXY_VERSION="2.7.0"
S3PROXY_HASH="1a13c27f78902b57db871a2e638f520f439811b1c98b2208ff71ba64b61c4f3f"
S3PROXY_BINARY="${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}"
CHAOS_HTTP_PROXY_VERSION="1.1.0"
CHAOS_HTTP_PROXY_HASH="9ad1b9ac6569e99b2db3e7edfdd78fae0ea5c83069beccdf6bceebc848add2e7"
CHAOS_HTTP_PROXY_BINARY="chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}"
PJDFSTEST_HASH="c711b5f6b666579846afba399a998f74f60c488b"
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
then
echo "Missing credentials file: ${S3FS_CREDENTIALS_FILE}"
@ -113,6 +118,16 @@ if [ ! -d "${TEST_BUCKET_MOUNT_POINT_1}" ]; then
mkdir -p "${TEST_BUCKET_MOUNT_POINT_1}"
fi
# [NOTE]
# For the Github Actions macos-14 Runner,
# Set variables for when stdbuf is used and when it is not.
#
if [ -n "${STDBUF_BIN}" ]; then
STDBUF_COMMAND_LINE=("${STDBUF_BIN}" -oL -eL)
else
STDBUF_COMMAND_LINE=()
fi
# This function execute the function parameters $1 times
# before giving up, with 1 second delays.
function retry {
@ -156,7 +171,9 @@ function start_s3proxy {
then
if [ ! -e "${S3PROXY_BINARY}" ]; then
curl "https://github.com/gaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
--fail --location --silent --output "${S3PROXY_BINARY}"
--fail --location --silent --output "/tmp/${S3PROXY_BINARY}"
echo "$S3PROXY_HASH" "/tmp/${S3PROXY_BINARY}" | sha256sum --check
mv "/tmp/${S3PROXY_BINARY}" "${S3PROXY_BINARY}"
chmod +x "${S3PROXY_BINARY}"
fi
@ -174,7 +191,7 @@ function start_s3proxy {
S3PROXY_CACERT_FILE=""
fi
"${STDBUF_BIN}" -oL -eL java -jar "${S3PROXY_BINARY}" --properties "${S3PROXY_CONFIG}" &
"${STDBUF_COMMAND_LINE[@]}" java -jar "${S3PROXY_BINARY}" --properties "${S3PROXY_CONFIG}" &
S3PROXY_PID=$!
# wait for S3Proxy to start
@ -184,16 +201,30 @@ function start_s3proxy {
if [ -n "${CHAOS_HTTP_PROXY}" ] || [ -n "${CHAOS_HTTP_PROXY_OPT}" ]; then
if [ ! -e "${CHAOS_HTTP_PROXY_BINARY}" ]; then
curl "https://github.com/bouncestorage/chaos-http-proxy/releases/download/chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}/chaos-http-proxy" \
--fail --location --silent --output "${CHAOS_HTTP_PROXY_BINARY}"
--fail --location --silent --output "/tmp/${CHAOS_HTTP_PROXY_BINARY}"
echo "$CHAOS_HTTP_PROXY_HASH" "/tmp/${CHAOS_HTTP_PROXY_BINARY}" | sha256sum --check
mv "/tmp/${CHAOS_HTTP_PROXY_BINARY}" "${CHAOS_HTTP_PROXY_BINARY}"
chmod +x "${CHAOS_HTTP_PROXY_BINARY}"
fi
"${STDBUF_BIN}" -oL -eL java -jar "${CHAOS_HTTP_PROXY_BINARY}" --properties chaos-http-proxy.conf &
"${STDBUF_COMMAND_LINE[@]}" java -jar "${CHAOS_HTTP_PROXY_BINARY}" --properties chaos-http-proxy.conf &
CHAOS_HTTP_PROXY_PID=$!
# wait for Chaos HTTP Proxy to start
wait_for_port 1080
fi
if [ ! -d "pjd-pjdfstest-${PJDFSTEST_HASH:0:7}" ]; then
curl "https://api.github.com/repos/pjd/pjdfstest/tarball/${PJDFSTEST_HASH}" \
--fail --location --silent --output /tmp/pjdfstest.tar.gz
tar zxf /tmp/pjdfstest.tar.gz
rm -f /tmp/pjdfstest.tar.gz
rm -f pjdfstest
ln -s "pjd-pjdfstest-${PJDFSTEST_HASH:0:7}" pjdfstest
(cd pjdfstest && autoreconf -ifs && ./configure && make)
fi
}
function stop_s3proxy {
@ -296,14 +327,14 @@ function start_s3fs {
(
set -x
CURL_CA_BUNDLE="${S3PROXY_CACERT_FILE}" \
${STDBUF_BIN} -oL -eL \
"${STDBUF_COMMAND_LINE[@]}" \
${VALGRIND_EXEC} \
${S3FS} \
${TEST_BUCKET_1} \
${TEST_BUCKET_MOUNT_POINT_1} \
-o use_path_request_style \
-o url="${S3_URL}" \
-o endpoint="${S3_ENDPOINT}" \
-o region="${S3_ENDPOINT}" \
-o use_xattr=1 \
-o enable_unsigned_payload \
${AUTH_OPT} \
@ -319,7 +350,7 @@ function start_s3fs {
-f \
"${@}" &
echo $! >&3
) 3>pid | "${STDBUF_BIN}" -oL -eL "${SED_BIN}" "${SED_BUFFER_FLAG}" "s/^/s3fs: /" &
) 3>pid | "${STDBUF_COMMAND_LINE[@]}" "${SED_BIN}" "${SED_BUFFER_FLAG}" "s/^/s3fs: /" &
sleep 1
S3FS_PID=$(<pid)
export S3FS_PID

View File

@ -33,7 +33,7 @@ function test_create_empty_file {
check_file_size "${TEST_TEXT_FILE}" 0
aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${OBJECT_NAME}"
s3_head "${TEST_BUCKET_1}/${OBJECT_NAME}"
rm_test_file
}
@ -98,7 +98,7 @@ function test_truncate_shrink_file {
local BIG_TRUNCATE_TEST_FILE="big-truncate-test.bin"
local t_size=$((1024 * 1024 * 32 + 64))
dd if=/dev/urandom of="${TEMP_DIR}/${BIG_TRUNCATE_TEST_FILE}" bs=1024 count=$((1024 * 64))
../../junk_data $((64 * 1024 * 1024)) > "${TEMP_DIR}/${BIG_TRUNCATE_TEST_FILE}"
cp "${TEMP_DIR}/${BIG_TRUNCATE_TEST_FILE}" "${BIG_TRUNCATE_TEST_FILE}"
"${TRUNCATE_BIN}" "${TEMP_DIR}/${BIG_TRUNCATE_TEST_FILE}" -s "${t_size}"
@ -122,7 +122,7 @@ function test_truncate_shrink_read_file {
# create file
dd if=/dev/urandom of="${TEST_TEXT_FILE}" bs="${init_size}" count=1
# truncate(shrink) file and read it before flusing
# truncate(shrink) file and read it before flushing
../../truncate_read_file "${TEST_TEXT_FILE}" "${shrink_size}"
# check file size
@ -152,7 +152,7 @@ function test_mv_file {
mk_test_file
# save file length
local ALT_TEXT_LENGTH; ALT_TEXT_LENGTH=$(wc -c "${TEST_TEXT_FILE}" | awk '{print $1}')
local ALT_TEXT_LENGTH; ALT_TEXT_LENGTH=$(wc -c < "${TEST_TEXT_FILE}")
#rename the test file
mv "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}"
@ -169,7 +169,7 @@ function test_mv_file {
fi
# Check the contents of the alt file
local ALT_FILE_LENGTH; ALT_FILE_LENGTH=$(wc -c "${ALT_TEST_TEXT_FILE}" | awk '{print $1}')
local ALT_FILE_LENGTH; ALT_FILE_LENGTH=$(wc -c < "${ALT_TEST_TEXT_FILE}")
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
then
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
@ -381,7 +381,7 @@ function test_remove_nonempty_directory {
function test_external_directory_creation {
describe "Test external directory creation ..."
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/directory/"${TEST_TEXT_FILE}"
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "data" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
# shellcheck disable=SC2010
ls | grep -q directory
stat directory >/dev/null 2>&1
@ -403,10 +403,17 @@ function test_external_modification {
# cache will be read out.
# Therefore, we need to wait over 1 second here.
#
# In particular, on MacOS, getattrs may be called after a file is
# uploaded(released).
# This extends the expiration date of the target file(1 sec by
# stat_cache_interval_expire option).
# Therefore, on MacOS, you need to add an additional 1 sec.
#
sleep 1
wait_ostype 1 "Darwin"
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "new new" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp "${TEST_TEXT_FILE}" <(echo "new new")
rm -f "${TEST_TEXT_FILE}"
@ -425,7 +432,7 @@ function test_external_creation {
# If noobj_cache is enabled, we cannot be sure that it is registered in that cache.
# That's because an error will occur if the upload by aws cli takes more than 1 second.
#
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "data" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
wait_ostype 1
@ -437,7 +444,7 @@ function test_external_creation {
function test_read_external_object() {
describe "create objects via aws CLI and read via s3fs ..."
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "test" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp "${TEST_TEXT_FILE}" <(echo "test")
rm -f "${TEST_TEXT_FILE}"
}
@ -448,7 +455,7 @@ function test_read_external_dir_object() {
local SUB_DIR_TEST_FILE; SUB_DIR_TEST_FILE="${SUB_DIR_NAME}/${TEST_TEXT_FILE}"
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${SUB_DIR_TEST_FILE}"
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "test" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
if stat "${SUB_DIR_NAME}" | grep -q '1969-12-31[[:space:]]23:59:59[.]000000000'; then
echo "sub directory a/c/m time is underflow(-1)."
@ -463,12 +470,12 @@ function test_update_metadata_external_small_object() {
# [NOTE]
# Use the only filename in the test to avoid being affected by noobjcache.
#
local TEST_FILE_EXT; TEST_FILE_EXT=$(make_random_string)
local TEST_FILE_EXT; TEST_FILE_EXT=$(mktemp "XXXXXXXXXX")
local TEST_CHMOD_FILE="${TEST_TEXT_FILE}_chmod.${TEST_FILE_EXT}"
local TEST_CHOWN_FILE="${TEST_TEXT_FILE}_chown.${TEST_FILE_EXT}"
local TEST_UTIMENS_FILE="${TEST_TEXT_FILE}_utimens.${TEST_FILE_EXT}"
local TEST_SETXATTR_FILE="${TEST_TEXT_FILE}_xattr.${TEST_FILE_EXT}"
local TEST_RMXATTR_FILE="${TEST_TEXT_FILE}_xattr.${TEST_FILE_EXT}"
local TEST_SETXATTR_FILE="${TEST_TEXT_FILE}_set_xattr.${TEST_FILE_EXT}"
local TEST_RMXATTR_FILE="${TEST_TEXT_FILE}_rm_xattr.${TEST_FILE_EXT}"
local TEST_INPUT="TEST_STRING_IN_SMALL_FILE"
@ -476,7 +483,7 @@ function test_update_metadata_external_small_object() {
# chmod
#
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHMOD_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
chmod +x "${TEST_CHMOD_FILE}"
cmp "${TEST_CHMOD_FILE}" <(echo "${TEST_INPUT}")
@ -484,7 +491,7 @@ function test_update_metadata_external_small_object() {
# chown
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHOWN_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
chown "${UID}" "${TEST_CHOWN_FILE}"
cmp "${TEST_CHOWN_FILE}" <(echo "${TEST_INPUT}")
@ -492,7 +499,7 @@ function test_update_metadata_external_small_object() {
# utimens
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_UTIMENS_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
touch "${TEST_UTIMENS_FILE}"
cmp "${TEST_UTIMENS_FILE}" <(echo "${TEST_INPUT}")
@ -500,19 +507,40 @@ function test_update_metadata_external_small_object() {
# set xattr
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_SETXATTR_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
set_xattr key value "${TEST_SETXATTR_FILE}"
cmp "${TEST_SETXATTR_FILE}" <(echo "${TEST_INPUT}")
XATTR_VALUE=$(get_xattr key "${TEST_SETXATTR_FILE}")
if [ -z "${XATTR_VALUE}" ] || [ "${XATTR_VALUE}" != "value" ]; then
echo "could not read xattr(key) value."
return 1
fi
#
# remove xattr
#
# "%7B%22key%22%3A%22dmFsdWU%3D%22%7D" = {"key":"value"}
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_RMXATTR_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --metadata xattr=%7B%22key%22%3A%22dmFsdWU%3D%22%7D
del_xattr key "${TEST_RMXATTR_FILE}"
cmp "${TEST_RMXATTR_FILE}" <(echo "${TEST_INPUT}")
# [FIXME]
# For macos, the xattrs value specified with the "--metadata" option cannot
# be set on the object. We confirmed this with the following versions:
# aws-cli/2.27.35 Python/3.13.3 Darwin/22.6.0 exe/x86_64
# We also tried to run "aws s3api put-object", but the result was the same.
#
# Since xattrs cannot be set on objects uploaded with the aws command, this
# will be skipped for macos.
# If a solution is found in the future, we will test it on macos as well.
#
if ! uname | grep -q Darwin; then
OBJECT_NAME=$(basename "${PWD}")/"${TEST_RMXATTR_FILE}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-xattr: %7B%22key%22%3A%22dmFsdWU%3D%22%7D"
del_xattr key "${TEST_RMXATTR_FILE}"
cmp "${TEST_RMXATTR_FILE}" <(echo "${TEST_INPUT}")
if find_xattr key "${TEST_RMXATTR_FILE}"; then
echo "could read xattr(key) value after removing it."
return 1
fi
fi
rm -f "${TEST_CHMOD_FILE}"
rm -f "${TEST_CHOWN_FILE}"
@ -527,12 +555,12 @@ function test_update_metadata_external_large_object() {
# [NOTE]
# Use the only filename in the test to avoid being affected by noobjcache.
#
local TEST_FILE_EXT; TEST_FILE_EXT=$(make_random_string)
local TEST_FILE_EXT; TEST_FILE_EXT=$(mktemp "XXXXXXXXXX")
local TEST_CHMOD_FILE="${TEST_TEXT_FILE}_chmod.${TEST_FILE_EXT}"
local TEST_CHOWN_FILE="${TEST_TEXT_FILE}_chown.${TEST_FILE_EXT}"
local TEST_UTIMENS_FILE="${TEST_TEXT_FILE}_utimens.${TEST_FILE_EXT}"
local TEST_SETXATTR_FILE="${TEST_TEXT_FILE}_xattr.${TEST_FILE_EXT}"
local TEST_RMXATTR_FILE="${TEST_TEXT_FILE}_xattr.${TEST_FILE_EXT}"
local TEST_SETXATTR_FILE="${TEST_TEXT_FILE}_set_xattr.${TEST_FILE_EXT}"
local TEST_RMXATTR_FILE="${TEST_TEXT_FILE}_rm_xattr.${TEST_FILE_EXT}"
../../junk_data $((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT)) > "${TEMP_DIR}/${BIG_FILE}"
@ -540,7 +568,7 @@ function test_update_metadata_external_large_object() {
# chmod
#
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHMOD_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
chmod +x "${TEST_CHMOD_FILE}"
cmp "${TEST_CHMOD_FILE}" "${TEMP_DIR}/${BIG_FILE}"
@ -548,7 +576,7 @@ function test_update_metadata_external_large_object() {
# chown
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHOWN_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
chown "${UID}" "${TEST_CHOWN_FILE}"
cmp "${TEST_CHOWN_FILE}" "${TEMP_DIR}/${BIG_FILE}"
@ -556,7 +584,7 @@ function test_update_metadata_external_large_object() {
# utimens
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_UTIMENS_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
touch "${TEST_UTIMENS_FILE}"
cmp "${TEST_UTIMENS_FILE}" "${TEMP_DIR}/${BIG_FILE}"
@ -564,19 +592,40 @@ function test_update_metadata_external_large_object() {
# set xattr
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_SETXATTR_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
set_xattr key value "${TEST_SETXATTR_FILE}"
cmp "${TEST_SETXATTR_FILE}" "${TEMP_DIR}/${BIG_FILE}"
XATTR_VALUE=$(get_xattr key "${TEST_SETXATTR_FILE}")
if [ -z "${XATTR_VALUE}" ] || [ "${XATTR_VALUE}" != "value" ]; then
echo "could not read xattr(key) value."
return 1
fi
#
# remove xattr
#
# "%7B%22key%22%3A%22dmFsdWU%3D%22%7D" = {"key":"value"}
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_RMXATTR_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress --metadata xattr=%7B%22key%22%3A%22dmFsdWU%3D%22%7D
del_xattr key "${TEST_RMXATTR_FILE}"
cmp "${TEST_RMXATTR_FILE}" "${TEMP_DIR}/${BIG_FILE}"
# [FIXME]
# For macos, the xattrs value specified with the "--metadata" option cannot
# be set on the object. We confirmed this with the following versions:
# aws-cli/2.27.35 Python/3.13.3 Darwin/22.6.0 exe/x86_64
# We also tried to run "aws s3api put-object", but the result was the same.
#
# Since xattrs cannot be set on objects uploaded with the aws command, this
# will be skipped for macos.
# If a solution is found in the future, we will test it on macos as well.
#
if ! uname | grep -q Darwin; then
OBJECT_NAME=$(basename "${PWD}")/"${TEST_RMXATTR_FILE}"
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-xattr: %7B%22key%22%3A%22dmFsdWU%3D%22%7D" < "${TEMP_DIR}/${BIG_FILE}"
del_xattr key "${TEST_RMXATTR_FILE}"
cmp "${TEST_RMXATTR_FILE}" "${TEMP_DIR}/${BIG_FILE}"
if find_xattr key "${TEST_RMXATTR_FILE}"; then
echo "could read xattr(key) value after removing it."
return 1
fi
fi
rm -f "${TEMP_DIR}/${BIG_FILE}"
rm -f "${TEST_CHMOD_FILE}"
@ -654,7 +703,7 @@ function test_multipart_mix {
# it makes no sense, but copying files is because it leaves no cache.
#
cp "${TEMP_DIR}/${BIG_FILE}" "${TEMP_DIR}/${BIG_FILE}-mix"
cp "${BIG_FILE}" "${BIG_FILE}-mix"
cp_avoid_xattr_err "${BIG_FILE}" "${BIG_FILE}-mix"
local MODIFY_START_BLOCK=$((15*1024*1024/2/4))
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek="${MODIFY_START_BLOCK}" conv=notrunc
@ -671,7 +720,7 @@ function test_multipart_mix {
# modify directly(over file end offset)
#
cp "${TEMP_DIR}/${BIG_FILE}" "${TEMP_DIR}/${BIG_FILE}-mix"
cp "${BIG_FILE}" "${BIG_FILE}-mix"
cp_avoid_xattr_err "${BIG_FILE}" "${BIG_FILE}-mix"
local OVER_FILE_BLOCK_POS=$((26*1024*1024/4))
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek="${OVER_FILE_BLOCK_POS}" conv=notrunc
@ -687,7 +736,7 @@ function test_multipart_mix {
# (3) Writing from the 0th byte
#
cp "${TEMP_DIR}/${BIG_FILE}" "${TEMP_DIR}/${BIG_FILE}-mix"
cp "${BIG_FILE}" "${BIG_FILE}-mix"
cp_avoid_xattr_err "${BIG_FILE}" "${BIG_FILE}-mix"
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc
echo -n "0123456789ABCDEF" | dd of="${TEMP_DIR}/${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc
@ -703,7 +752,7 @@ function test_multipart_mix {
# modify directly(seek 1MB offset)
#
cp "${TEMP_DIR}/${BIG_FILE}" "${TEMP_DIR}/${BIG_FILE}-mix"
cp "${BIG_FILE}" "${BIG_FILE}-mix"
cp_avoid_xattr_err "${BIG_FILE}" "${BIG_FILE}-mix"
local MODIFY_START_BLOCK=$((1*1024*1024))
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek="${MODIFY_START_BLOCK}" conv=notrunc
@ -784,18 +833,11 @@ function test_hardlink {
echo foo > "${TEST_TEXT_FILE}"
(
if ! uname | grep -q Darwin; then
set +o pipefail
ln "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}" 2>&1 | grep -q -e 'Operation not supported' -e 'Not supported'
else
# [macos] fuse-t
# Not error return code, and no stderr
#
ln "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}"
if stat "${ALT_TEST_TEXT_FILE}" >/dev/null 2>&1; then
exit 1
fi
fi
# [NOTE]
# macos-fuse-t returns 'Input/output error'
#
set +o pipefail
ln "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}" 2>&1 | grep -q -e 'Operation not supported' -e 'Not supported' -e 'Input/output error'
)
rm_test_file
@ -881,7 +923,7 @@ function test_mtime_file {
mk_test_file
#copy the test file with preserve mode
cp -p "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}"
cp_avoid_xattr_err -p "${TEST_TEXT_FILE}" "${ALT_TEST_TEXT_FILE}"
local testmtime; testmtime=$(get_mtime "${TEST_TEXT_FILE}")
local testctime; testctime=$(get_ctime "${TEST_TEXT_FILE}")
@ -919,7 +961,7 @@ function test_update_time_chmod() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -946,7 +988,7 @@ function test_update_time_chown() {
#
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -989,7 +1031,7 @@ function test_update_time_xattr() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1001,10 +1043,23 @@ function test_update_time_xattr() {
local atime; atime=$(get_atime "${TEST_TEXT_FILE}")
local ctime; ctime=$(get_ctime "${TEST_TEXT_FILE}")
local mtime; mtime=$(get_mtime "${TEST_TEXT_FILE}")
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "set_xattr expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
if ! uname | grep -q Darwin; then
if [ "${base_atime}" != "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "set_xattr expected updated ctime: $base_ctime != $ctime and same mtime: $base_mtime == $mtime, atime: $base_atime == $atime"
return 1
fi
else
# [macos] fuse-t
# atime/ctime are all updated.
# see) https://github.com/macos-fuse-t/fuse-t/issues/87
#
if [ "${base_atime}" = "${atime}" ] || [ "${base_ctime}" = "${ctime}" ] || [ "${base_mtime}" != "${mtime}" ]; then
echo "set_xattr expected updated ctime: $base_ctime != $ctime, atime: $base_atime != $atime and same mtime: $base_mtime == $mtime"
return 1
fi
fi
rm_test_file
}
@ -1013,7 +1068,7 @@ function test_update_time_touch() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1037,7 +1092,7 @@ function test_update_time_touch_a() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1073,7 +1128,7 @@ function test_update_time_append() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1097,7 +1152,7 @@ function test_update_time_cp_p() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1106,7 +1161,7 @@ function test_update_time_cp_p() {
# cp -p -> update ctime, not update atime/mtime
#
local TIME_TEST_TEXT_FILE=test-s3fs-time.txt
cp -p "${TEST_TEXT_FILE}" "${TIME_TEST_TEXT_FILE}"
cp_avoid_xattr_err -p "${TEST_TEXT_FILE}" "${TIME_TEST_TEXT_FILE}"
local atime; atime=$(get_atime "${TIME_TEST_TEXT_FILE}")
local ctime; ctime=$(get_ctime "${TIME_TEST_TEXT_FILE}")
local mtime; mtime=$(get_mtime "${TIME_TEST_TEXT_FILE}")
@ -1123,7 +1178,7 @@ function test_update_time_mv() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1157,7 +1212,7 @@ function test_update_directory_time_chmod() {
#
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1183,7 +1238,7 @@ function test_update_directory_time_chown {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1219,7 +1274,7 @@ function test_update_directory_time_set_xattr {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1255,7 +1310,7 @@ function test_update_directory_time_touch {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1280,7 +1335,7 @@ function test_update_directory_time_touch_a {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -2057,6 +2112,15 @@ function test_truncate_cache() {
for file in $(seq 75); do
touch "${dir}/${file}"
done
# FIXME:
# In the case of macos-fuse-t, if you do not enter a wait here, the following error may occur:
# "ls: fts_read: Input/output error"
# Currently, we have not yet been able to establish a solution to this problem.
# Please pay attention to future developments in macos-fuse-t.
#
wait_ostype 1 "Darwin"
ls "${dir}"
done
@ -2322,8 +2386,8 @@ function test_not_existed_dir_obj() {
#
local OBJECT_NAME_1; OBJECT_NAME_1="${DIR_NAME}/not_existed_dir_single/${TEST_TEXT_FILE}"
local OBJECT_NAME_2; OBJECT_NAME_2="${DIR_NAME}/not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}"
echo data1 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_1}"
echo data2 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_2}"
echo data1 | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME_1}"
echo data2 | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME_2}"
# Top directory
# shellcheck disable=SC2010
@ -2382,14 +2446,6 @@ function test_not_existed_dir_obj() {
rm -rf not_existed_dir_parent
}
function test_ut_ossfs {
describe "Testing ossfs python ut..."
# shellcheck disable=SC2153
export TEST_BUCKET_MOUNT_POINT="${TEST_BUCKET_MOUNT_POINT_1}"
../../ut_test.py
}
function test_cr_filename {
describe "Testing filename with CR code ..."
@ -2551,9 +2607,9 @@ function test_not_boundary_writes {
# Part number 2: 10,485,760 - 20,971,519 (size = 10MB)
# Part number 3: 20,971,520 - 26,214,399 (size = 5MB)
#
local BOUNDAY_TEST_FILE_SIZE; BOUNDAY_TEST_FILE_SIZE=$((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT))
local BOUNDARY_TEST_FILE_SIZE; BOUNDARY_TEST_FILE_SIZE=$((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT))
../../junk_data "${BOUNDAY_TEST_FILE_SIZE}" > "${TEST_TEXT_FILE}"
../../junk_data "${BOUNDARY_TEST_FILE_SIZE}" > "${TEST_TEXT_FILE}"
#
# Write in First boundary
@ -2687,7 +2743,7 @@ function test_file_names_longer_than_posix() {
fi
rm -f "${a256}"
echo data | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${DIR_NAME}/${a256}"
echo data | s3_cp "${TEST_BUCKET_1}/${DIR_NAME}/${a256}"
files=(*)
if [ "${#files[@]}" = 0 ]; then
echo "failed to list long file name"
@ -2704,10 +2760,10 @@ function test_statvfs() {
# but the order of Total/Used/Available size is the same.
#
local MOUNTPOINT_DIR; MOUNTPOINT_DIR=$(cd ..; pwd)
local DF_RESULT; DF_RESULT=$(df "${MOUNTPOINT_DIR}" 2>/dev/null | tail -n +2)
local TOTAL_SIZE; TOTAL_SIZE=$(echo "${DF_RESULT}" | awk '{print $2}')
local USED_SIZE; USED_SIZE=$(echo "${DF_RESULT}" | awk '{print $3}')
local AVAIL_SIZE; AVAIL_SIZE=$(echo "${DF_RESULT}" | awk '{print $4}')
local TOTAL_SIZE
local USED_SIZE
local AVAIL_SIZE
read -r _ TOTAL_SIZE USED_SIZE AVAIL_SIZE _ < <(df "${MOUNTPOINT_DIR}" 2>/dev/null | tail -n +2)
# [NOTE]
# In the disk information (statvfs) provided by s3fs, Total size and
@ -2719,6 +2775,124 @@ function test_statvfs() {
fi
}
function test_pjdfstest_chflags() {
describe "Testing the pjdfstest : chflags..."
prove -rv ../../pjdfstest/tests/chflags/*.t
}
function test_pjdfstest_chmod() {
describe "Testing the pjdfstest : chmod..."
prove -rv ../../pjdfstest/tests/chmod/0[4689].t \
../../pjdfstest/tests/chmod/10.t
}
function test_pjdfstest_chown() {
describe "Testing the pjdfstest : chown..."
prove -rv ../../pjdfstest/tests/chown/0[4689].t \
../../pjdfstest/tests/chown/10.t
}
function test_pjdfstest_ftruncate() {
describe "Testing the pjdfstest : ftruncate..."
prove -rv ../../pjdfstest/tests/ftruncate/0[147-9].t \
../../pjdfstest/tests/ftruncate/1[0134].t
}
function test_pjdfstest_granular() {
describe "Testing the pjdfstest : granular..."
prove -rv ../../pjdfstest/tests/granular/*.t
}
function test_pjdfstest_link() {
describe "Testing the pjdfstest : link..."
prove -rv ../../pjdfstest/tests/link/*.t
}
function test_pjdfstest_mkdir() {
describe "Testing the pjdfstest : mkdir..."
prove -rv ../../pjdfstest/tests/mkdir/0[347-9].t \
../../pjdfstest/tests/mkdir/1[12]*.t
}
function test_pjdfstest_mkfifo() {
describe "Testing the pjdfstest : mkfifo..."
prove -rv ../../pjdfstest/tests/mkfifo/0[3478].t \
../../pjdfstest/tests/mkfifo/1*.t
}
function test_pjdfstest_mknod() {
describe "Testing the pjdfstest : mknod..."
prove -rv ../../pjdfstest/tests/mknod/0[479].t \
../../pjdfstest/tests/mknod/10.t
}
function test_pjdfstest_open() {
describe "Testing the pjdfstest : open..."
prove -rv ../../pjdfstest/tests/open/0[49].t \
../../pjdfstest/tests/open/1*.t \
../../pjdfstest/tests/open/2[0-134].t
}
function test_pjdfstest_posix_fallocate() {
describe "Testing the pjdfstest : posix_fallocate..."
prove -rv ../../pjdfstest/tests/posix_fallocate/*.t
}
function test_pjdfstest_rename() {
describe "Testing the pjdfstest : rename..."
prove -rv ../../pjdfstest/tests/rename/0[2-36-8].t \
../../pjdfstest/tests/rename/1[15-9].t \
../../pjdfstest/tests/rename/22.t
}
function test_pjdfstest_rmdir() {
describe "Testing the pjdfstest : rmdir..."
# TODO: explain exclusions
# fails with -o use_cache: ../../pjdfstest/tests/rmdir/01.t
prove -rv ../../pjdfstest/tests/rmdir/0[3-59].t \
../../pjdfstest/tests/rmdir/1[02-5].t
}
function test_pjdfstest_symlink() {
describe "Testing the pjdfstest : symlink..."
prove -rv ../../pjdfstest/tests/symlink/0[13479].t \
../../pjdfstest/tests/symlink/1*.t
}
function test_pjdfstest_truncate() {
describe "Testing the pjdfstest : truncate..."
prove -rv ../../pjdfstest/tests/truncate/0[147-9].t \
../../pjdfstest/tests/truncate/1[0134].t
}
function test_pjdfstest_unlink() {
describe "Testing the pjdfstest : unlink..."
prove -rv ../../pjdfstest/tests/unlink/0[47-8].t \
../../pjdfstest/tests/unlink/1[02-4].t
}
function test_pjdfstest_utimensat() {
describe "Testing the pjdfstest : utimensat..."
prove -rv ../../pjdfstest/tests/utimensat/0[1-58-9].t
}
function add_all_tests {
if s3fs_args | grep -q use_cache; then
add_tests test_cache_file_stat
@ -2756,15 +2930,27 @@ function add_all_tests {
add_tests test_rename_before_close
add_tests test_multipart_upload
add_tests test_multipart_copy
add_tests test_multipart_mix
if ! uname | grep -q Darwin || ! s3fs_args | grep -q nocopyapi; then
# FIXME:
# If you specify the nocopyapi option with macos-fuse-t, the following error will
# occur when manipulating the xattr of the copied object:
# "could not copy extended attributes to <file>: Result too large"
# As no solution has been found at this time, this test is bypassed on macos with
# nocopyapi.
# Please pay attention to future developments in macos-fuse-t.
#
add_tests test_multipart_mix
fi
add_tests test_utimens_during_multipart
add_tests test_special_characters
add_tests test_hardlink
add_tests test_symlink
if ! uname | grep -q Darwin; then
add_tests test_mknod
add_tests test_extended_attributes
fi
add_tests test_extended_attributes
add_tests test_mtime_file
add_tests test_update_time_chmod
@ -2780,15 +2966,30 @@ function add_all_tests {
add_tests test_update_directory_time_chmod
add_tests test_update_directory_time_chown
add_tests test_update_directory_time_set_xattr
add_tests test_update_directory_time_touch
if ! mount -t fuse.s3fs | grep "$TEST_BUCKET_MOUNT_POINT_1 " | grep -q -e noatime -e relatime ; then
add_tests test_update_directory_time_touch_a
fi
add_tests test_update_directory_time_subdir
if ! uname | grep -q Darwin; then
# FIXME:
# These test fail in macos-fuse-t because mtime/ctime/atime are not updated.
# Currently, these are not an issue with s3fs, so we will bypass this test for macos.
# Please pay attention to future developments in macos-fuse-t.
#
add_tests test_update_directory_time_set_xattr
add_tests test_update_directory_time_subdir
fi
add_tests test_update_chmod_opened_file
if s3fs_args | grep -q update_parent_dir_stat; then
add_tests test_update_parent_directory_time
if ! uname | grep -q Darwin; then
# FIXME:
# In macos-fuse-t, this test can sometimes succeed if the test waits for more
# than one second while it is processing.
# However, the results are currently unstable, thus this test is bypassed on macos.
# Please pay attention to future developments in macos-fuse-t.
#
add_tests test_update_parent_directory_time
fi
fi
if ! s3fs_args | grep -q use_xattr; then
add_tests test_posix_acl
@ -2812,7 +3013,6 @@ function add_all_tests {
if ! test -f /etc/os-release || ! grep -q -i -e 'ID=alpine' -e 'ID="alpine"' /etc/os-release; then
add_tests test_not_existed_dir_obj
fi
add_tests test_ut_ossfs
add_tests test_cr_filename
if ! s3fs_args | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
add_tests test_ensurespace_move_file
@ -2827,6 +3027,36 @@ function add_all_tests {
# add_tests test_chown_mountpoint
add_tests test_time_mountpoint
add_tests test_statvfs
if ! uname | grep -q Darwin; then
add_tests test_pjdfstest_chflags
add_tests test_pjdfstest_chmod
add_tests test_pjdfstest_chown
add_tests test_pjdfstest_ftruncate
add_tests test_pjdfstest_granular
add_tests test_pjdfstest_link
add_tests test_pjdfstest_mknod
add_tests test_pjdfstest_open
add_tests test_pjdfstest_posix_fallocate
add_tests test_pjdfstest_truncate
add_tests test_pjdfstest_unlink
add_tests test_pjdfstest_utimensat
# [NOTE][TODO]
# Temporary error workaround in Ubuntu 25.10
# In Ubuntu 25.10, there are test cases where the request header size exceeds
# 8192 bytes.
# Currently bypass running the below tests as s3proxy returns an error response.
# If s3proxy increases the allowed header size, we will resume these tests.
#
if ! ( . /etc/os-release 2>/dev/null && [ "${ID}" = "ubuntu" ] && [ "${VERSION_ID}" = "25.10" ] ); then
add_tests test_pjdfstest_mkdir
add_tests test_pjdfstest_mkfifo
add_tests test_pjdfstest_rename
add_tests test_pjdfstest_rmdir
add_tests test_pjdfstest_symlink
fi
fi
}
init_suite

View File

@ -0,0 +1,48 @@
#!/usr/bin/env python3
#
# locate map operator [] reads
#
# Example usage of this addon (scan a sourcefile main.cpp)
# cppcheck --dump main.cpp
# python map-subscript-read.py main.cpp.dump
import cppcheckdata
import sys
DEBUG = ('-debug' in sys.argv)
def reportError(token, severity, msg, id):
cppcheckdata.reportError(token, severity, msg, 'map', id)
def simpleMatch(token, pattern):
return cppcheckdata.simpleMatch(token, pattern)
def check_map_subscript(data):
#if data.language != 'cpp':
# return
for cfg in data.iterconfigurations():
for token in cfg.tokenlist:
if token.str != '[' or token.astOperand1 is None or token.astOperand2 is None:
continue
if token.astParent and token.astParent.str == '=' and token.astParent.astOperand1 == token:
continue
m = token.astOperand1
if m.variable is None:
continue
if simpleMatch(m.variable.typeStartToken, 'std :: map <'):
reportError(token, 'style', 'Reading from std::map with subscript operator [].', 'mapSubscriptRead')
elif simpleMatch(m.variable.typeStartToken, 'std :: unordered_map <'):
reportError(token, 'style', 'Reading from std::unordered_map with subscript operator [].', 'mapSubscriptRead')
for arg in sys.argv[1:]:
if arg == '--cli':
continue
data = cppcheckdata.CppcheckData(arg)
check_map_subscript(data)
sys.exit(cppcheckdata.EXIT_CODE)

View File

@ -47,15 +47,15 @@ ALLYES="no"
DIRPARAM=""
while [ "$1" != "" ]; do
if [ "X$1" = "X-help" ] || [ "X$1" = "X-h" ] || [ "X$1" = "X-H" ]; then
if [ "$1" = "-help" ] || [ "$1" = "-h" ] || [ "$1" = "-H" ]; then
UsageFunction "${OWNNAME}"
exit 0
elif [ "X$1" = "X-y" ] || [ "X$1" = "X-Y" ]; then
elif [ "$1" = "-y" ] || [ "$1" = "-Y" ]; then
AUTOYES="yes"
elif [ "X$1" = "X-all" ] || [ "X$1" = "X-ALL" ]; then
elif [ "$1" = "-all" ] || [ "$1" = "-ALL" ]; then
ALLYES="yes"
else
if [ "X$DIRPARAM" != "X" ]; then
if [ "$DIRPARAM" != "" ]; then
echo "*** Input error."
echo ""
UsageFunction "${OWNNAME}"
@ -65,7 +65,7 @@ while [ "$1" != "" ]; do
fi
shift
done
if [ "X$DIRPARAM" = "X" ]; then
if [ "$DIRPARAM" = "" ]; then
echo "*** Input error."
echo ""
UsageFunction "${OWNNAME}"
@ -138,11 +138,11 @@ for DIR in $DIRLIST; do
if [ "${AUTOYES}" = "yes" ]; then
ANSWER="y"
fi
while [ "X${ANSWER}" != "XY" ] && [ "X${ANSWER}" != "Xy" ] && [ "X${ANSWER}" != "XN" ] && [ "X${ANSWER}" != "Xn" ]; do
while [ "${ANSWER}" != "Y" ] && [ "${ANSWER}" != "y" ] && [ "${ANSWER}" != "N" ] && [ "${ANSWER}" != "n" ]; do
printf "%s" "Do you merge ${DIR} ? (y/n): "
read -r ANSWER
done
if [ "X${ANSWER}" != "XY" ] && [ "X${ANSWER}" != "Xy" ]; then
if [ "${ANSWER}" != "Y" ] && [ "${ANSWER}" != "y" ]; then
continue
fi

View File

@ -26,7 +26,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#ifndef __APPLE__
#if !defined(__APPLE__) && !defined(__FreeBSD__)
#include <sys/sysmacros.h>
#endif

View File

@ -6,6 +6,6 @@ s3proxy.credential=local-credential
s3proxy.keystore-path=/tmp/keystore.jks
s3proxy.keystore-password=password
jclouds.provider=transient
jclouds.provider=transient-nio2
jclouds.identity=remote-identity
jclouds.credential=remote-credential

View File

@ -20,7 +20,7 @@
#
#
# This is unsupport sample deleting cache files script.
# This is unsupported sample deleting cache files script.
# So s3fs's local cache files(stats and objects) grow up,
# you need to delete these.
# This script deletes these files with total size limit
@ -48,11 +48,11 @@ func_usage()
PRGNAME=$(basename "$0")
if [ "X$1" = "X-h" ] || [ "X$1" = "X-H" ]; then
if [ "$1" = "-h" ] || [ "$1" = "-H" ]; then
func_usage "${PRGNAME}"
exit 0
fi
if [ "X$1" = "X" ] || [ "X$2" = "X" ] || [ "X$3" = "X" ]; then
if [ "$1" = "" ] || [ "$2" = "" ] || [ "$3" = "" ]; then
func_usage "${PRGNAME}"
exit 1
fi
@ -61,7 +61,7 @@ BUCKET="$1"
CDIR="$2"
LIMIT="$3"
SILENT=0
if [ "X$4" = "X-silent" ]; then
if [ "$4" = "-silent" ]; then
SILENT=1
fi
FILES_CDIR="${CDIR}/${BUCKET}"

View File

@ -69,8 +69,8 @@ fi
start_s3proxy
if ! aws_cli s3api head-bucket --bucket "${TEST_BUCKET_1}" --region "${S3_ENDPOINT}"; then
aws_cli s3 mb "s3://${TEST_BUCKET_1}" --region "${S3_ENDPOINT}"
if ! s3_head "${TEST_BUCKET_1}"; then
s3_mb "${TEST_BUCKET_1}"
fi
for flag in "${FLAGS[@]}"; do

View File

@ -49,7 +49,6 @@ BIG_FILE_LENGTH=$((BIG_FILE_BLOCK_SIZE * BIG_FILE_COUNT))
# Set locale because some tests check for English expressions
export LC_ALL=en_US.UTF-8
export RUN_DIR
# [NOTE]
# stdbuf, truncate and sed installed on macos do not work as
@ -59,15 +58,24 @@ export RUN_DIR
# Set your PATH appropriately so that you can find these commands.
#
if [ "$(uname)" = "Darwin" ]; then
export STDBUF_BIN="gstdbuf"
# [NOTE][TODO]
# In macos-14(and maybe later), currently coreutils' gstdbuf doesn't
# work with the Github Actions Runner.
# This is because libstdbuf.so is arm64, when arm64e is required.
# To resolve this case, we'll avoid making calls to stdbuf. This can
# result in mixed log output, but there is currently no workaround.
#
if lipo -archs /opt/homebrew/Cellar/coreutils/9.8/libexec/coreutils/libstdbuf.so 2>/dev/null | grep -q 'arm64e'; then
export STDBUF_BIN="gstdbuf"
else
export STDBUF_BIN=""
fi
export TRUNCATE_BIN="gtruncate"
export SED_BIN="gsed"
export BASE64_BIN="gbase64"
else
export STDBUF_BIN="stdbuf"
export TRUNCATE_BIN="truncate"
export SED_BIN="sed"
export BASE64_BIN="base64"
fi
export SED_BUFFER_FLAG="--unbuffered"
@ -81,6 +89,18 @@ else
STAT_BIN=(stat)
fi
function find_xattr() {
if [ "$(uname)" = "Darwin" ]; then
local LIST_XATTRS_KEYVALS; LIST_XATTRS_KEYVALS=$(xattr -l "$2" 2>/dev/null)
if ! echo "${LIST_XATTRS_KEYVALS}" | grep -q "$1"; then
return 1
fi
else
getfattr --absolute-names -n "$1" "$2" >/dev/null 2>&1 || return 1
fi
return 0
}
function get_xattr() {
if [ "$(uname)" = "Darwin" ]; then
xattr -p "$1" "$2"
@ -125,20 +145,21 @@ function check_file_size() {
local FILE_NAME="$1"
local EXPECTED_SIZE="$2"
# Verify file is zero length via metadata
# Verify file length via metadata
local size
size=$(get_size "${FILE_NAME}")
if [ "${size}" -ne "${EXPECTED_SIZE}" ]
then
echo "error: expected ${FILE_NAME} to be zero length"
echo "error: expected ${FILE_NAME} to be ${EXPECTED_SIZE} length but was ${size} via metadata"
return 1
fi
# Verify file is zero length via data
size=$(wc -c < "${FILE_NAME}")
# Verify file length via data
# shellcheck disable=SC2002
size=$(cat "${FILE_NAME}" | wc -c)
if [ "${size}" -ne "${EXPECTED_SIZE}" ]
then
echo "error: expected ${FILE_NAME} to be ${EXPECTED_SIZE} length, got ${size}"
echo "error: expected ${FILE_NAME} to be ${EXPECTED_SIZE} length, got ${size} via data"
return 1
fi
}
@ -190,11 +211,10 @@ function rm_test_dir {
}
# Create and cd to a unique directory for this test run
# Sets RUN_DIR to the name of the created directory
function cd_run_dir {
if [ "${TEST_BUCKET_MOUNT_POINT_1}" = "" ]; then
echo "TEST_BUCKET_MOUNT_POINT_1 variable not set"
exit 1
return 1
fi
local RUN_DIR="${TEST_BUCKET_MOUNT_POINT_1}/${1}"
mkdir -p "${RUN_DIR}"
@ -202,6 +222,12 @@ function cd_run_dir {
}
function clean_run_dir {
if [ "${TEST_BUCKET_MOUNT_POINT_1}" = "" ]; then
echo "TEST_BUCKET_MOUNT_POINT_1 variable not set"
return 1
fi
local RUN_DIR="${TEST_BUCKET_MOUNT_POINT_1}/${1}"
if [ -d "${RUN_DIR}" ]; then
rm -rf "${RUN_DIR}" || echo "Error removing ${RUN_DIR}"
fi
@ -247,7 +273,11 @@ function describe {
function run_suite {
orig_dir="${PWD}"
key_prefix="testrun-${RANDOM}"
cd_run_dir "${key_prefix}"
if ! cd_run_dir "${key_prefix}"; then
return 1
fi
for t in "${TEST_LIST[@]}"; do
# Ensure test input name differs every iteration
TEST_TEXT_FILE="test-s3fs-${RANDOM}.txt"
@ -274,8 +304,11 @@ function run_suite {
fi
set -o errexit
done
cd "${orig_dir}"
clean_run_dir
if ! clean_run_dir "${key_prefix}"; then
return 1
fi
for t in "${TEST_PASSED_LIST[@]}"; do
echo "PASS: ${t}"
@ -296,10 +329,26 @@ function run_suite {
fi
}
# [TODO]
# Temporary Solution for Ubuntu 25.10 Only
#
# As of October 2025, the stat command in uutils coreutils (Rust) in Ubuntu 25.10
# truncates the decimal point when retrieving atime/ctime individually.
# We will take special measures to avoid this.
# We will revert this once this issue is fixed.
#
TIME_FROM_FULL_STAT=$([ -f /etc/os-release ] && awk '/^ID=ubuntu/{os=1} /^VERSION_ID="25.10"/{version=1} END{print (os && version)}' /etc/os-release || echo 0)
function get_ctime() {
# ex: "1657504903.019784214"
if [ "$(uname)" = "Darwin" ]; then
"${STAT_BIN[@]}" -f "%Fc" "$1"
elif [ "${TIME_FROM_FULL_STAT}" -eq 1 ]; then
TEMP_ATIME=$(stat "$1" | grep '^Change:' | awk '{print $2" "$3}')
TEMP_ATIME_SEC=$(date -d "${TEMP_ATIME}" +"%s")
TEMP_ATIME_NSEC=$(stat "$1" | awk '/^Change:/{print $3}' | cut -d'.' -f2)
printf '%s.%s' "${TEMP_ATIME_SEC}" "${TEMP_ATIME_NSEC}"
else
"${STAT_BIN[@]}" --format "%.9Z" "$1"
fi
@ -318,6 +367,12 @@ function get_atime() {
# ex: "1657504903.019784214"
if [ "$(uname)" = "Darwin" ]; then
"${STAT_BIN[@]}" -f "%Fa" "$1"
elif [ "${TIME_FROM_FULL_STAT}" -eq 1 ]; then
TEMP_ATIME=$(stat "$1" | grep '^Access:' | grep -v 'Uid:' | awk '{print $2" "$3}')
TEMP_ATIME_SEC=$(date -d "${TEMP_ATIME}" +"%s")
TEMP_ATIME_NSEC=$(stat "$1" | grep -v 'Uid:' | awk '/^Access:/{print $3}' | cut -d'.' -f2)
printf '%s.%s' "${TEMP_ATIME_SEC}" "${TEMP_ATIME_NSEC}"
else
"${STAT_BIN[@]}" --format "%.9X" "$1"
fi
@ -341,7 +396,10 @@ function get_user_and_group() {
function check_content_type() {
local INFO_STR
INFO_STR=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "$1" | jq -r .ContentType)
TEMPNAME="$(mktemp)"
s3_head "${TEST_BUCKET_1}/$1" --dump-header "$TEMPNAME"
INFO_STR=$(sed -n 's/^Content-Type: //pi' "$TEMPNAME" | tr -d '\r\n')
rm -f "$TEMPNAME"
if [ "${INFO_STR}" != "$2" ]
then
echo "Expected Content-Type: $2 but got: ${INFO_STR}"
@ -351,30 +409,38 @@ function check_content_type() {
function get_disk_avail_size() {
local DISK_AVAIL_SIZE
DISK_AVAIL_SIZE=$(BLOCKSIZE=$((1024 * 1024)) df "$1" | awk '{print $4}' | tail -n 1)
read -r _ _ _ DISK_AVAIL_SIZE _ < <(BLOCKSIZE=$((1024 * 1024)) df "$1" | tail -n 1)
echo "${DISK_AVAIL_SIZE}"
}
function aws_cli() {
local FLAGS=""
if [ -n "${S3FS_PROFILE}" ]; then
FLAGS="--profile ${S3FS_PROFILE}"
fi
function s3_head() {
local S3_PATH=$1
shift
curl --aws-sigv4 "aws:amz:$S3_ENDPOINT:s3" --user "$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" \
--cacert "$S3PROXY_CACERT_FILE" --fail --silent \
"$@" \
--head "$S3_URL/$S3_PATH"
}
if [ "$1" = "s3" ] && [ "$2" != "ls" ] && [ "$2" != "mb" ]; then
if s3fs_args | grep -q use_sse=custom; then
FLAGS="${FLAGS} --sse-c AES256 --sse-c-key fileb:///tmp/ssekey.bin"
fi
elif [ "$1" = "s3api" ] && [ "$2" != "head-bucket" ]; then
if s3fs_args | grep -q use_sse=custom; then
FLAGS="${FLAGS} --sse-customer-algorithm AES256 --sse-customer-key $(cat /tmp/ssekey) --sse-customer-key-md5 $(cat /tmp/ssekeymd5)"
fi
fi
function s3_mb() {
local S3_BUCKET=$1
curl --aws-sigv4 "aws:amz:$S3_ENDPOINT:s3" --user "$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" \
--cacert "$S3PROXY_CACERT_FILE" --fail --silent \
--request PUT "$S3_URL/$S3_BUCKET"
}
# [NOTE]
# AWS_EC2_METADATA_DISABLED for preventing the metadata service(to 169.254.169.254).
# shellcheck disable=SC2086,SC2068
AWS_EC2_METADATA_DISABLED=true aws $@ --endpoint-url "${S3_URL}" --ca-bundle /tmp/keystore.pem ${FLAGS}
function s3_cp() {
local S3_PATH=$1
shift
TEMPNAME="$(mktemp)"
cat > "$TEMPNAME"
# TODO: use filenames instead of stdin?
curl --aws-sigv4 "aws:amz:$S3_ENDPOINT:s3" --user "$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" \
--cacert "$S3PROXY_CACERT_FILE" --fail --silent \
--header "Content-Length: $(wc -c < "$TEMPNAME")" \
"$@" \
--request PUT --data-binary "@$TEMPNAME" "$S3_URL/$S3_PATH"
rm -f "$TEMPNAME"
}
function wait_for_port() {
@ -390,22 +456,6 @@ function wait_for_port() {
done
}
function make_random_string() {
if [ -n "$1" ]; then
local END_POS="$1"
else
local END_POS=8
fi
if [ "$(uname)" = "Darwin" ]; then
local BASE64_OPT="--break=0"
else
local BASE64_OPT="--wrap=0"
fi
"${BASE64_BIN}" "${BASE64_OPT}" < /dev/urandom 2>/dev/null | tr -d /+ | head -c "${END_POS}"
return 0
}
function s3fs_args() {
if [ "$(uname)" = "Darwin" ]; then
ps -o args -p "${S3FS_PID}" | tail -n +2
@ -433,6 +483,39 @@ function wait_ostype() {
fi
}
#
# Avoid extended attribute errors when copying on macos(fuse-t)
#
# [NOTE][FIXME]
# This avoids an error that occurs when copying (cp command) on macos
# (fuse-t) stating that extended attributes cannot be copied and the
# exit code becomes anything other than 0.
#Even if this error occurs, the copy itself is successful.
#
# This issue is currently(2024/11/7) still in the process of being
# fixed, so we will wait and see.
# This issue only occurred in the test_multipart_mix test with the
# nocopyapi option, but in macos-13 Github Actions, it occurs in
# some tests that use the use_xattr option.
#
function cp_avoid_xattr_err() {
if uname | grep -q Darwin; then
if ! cp "$@"; then
return $?
fi
else
local CP_RESULT="";
if ! CP_RESULT=$(cp "$@" 2>&1); then
local CP_EXITCODE=$?
if ! echo "${CP_RESULT}" | grep -q -i "Result too large"; then
return "${CP_EXITCODE}"
fi
echo "[FIXME: MACOS] ${CP_RESULT}"
fi
fi
return 0
}
#
# Local variables:
# tab-width: 4

Some files were not shown because too many files have changed in this diff Show More