Compare commits
263 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 70a30d6e26 | |||
| b97fd470a5 | |||
| 4d7fd60305 | |||
| da38dc73ad | |||
| e89adf6633 | |||
| fa2bcfc60d | |||
| ed1d431a1f | |||
| 67442cf054 | |||
| a7186b6072 | |||
| 517574c40c | |||
| 5e6f21a9ff | |||
| 54aa278df0 | |||
| 2f9fb74a42 | |||
| b82632547c | |||
| e3b50ad3e1 | |||
| b139507ae6 | |||
| feb0845103 | |||
| f041812939 | |||
| 2b57e74330 | |||
| b671fa7a9c | |||
| 691669749e | |||
| 10a72bfd0f | |||
| 43f81b76af | |||
| 68bbfee8ea | |||
| ec8caf64b8 | |||
| bcacca6599 | |||
| 4fdd2456d4 | |||
| 3f6e8a8707 | |||
| 4845831f93 | |||
| 919575f312 | |||
| 0cd73e406d | |||
| 807ec1f6f7 | |||
| e2ac9b45e8 | |||
| b15ed13807 | |||
| f9d3941d9d | |||
| 34c379babb | |||
| 7b5111c955 | |||
| a3964b3fcd | |||
| 3856637cd2 | |||
| 2871975d1e | |||
| d5dd17644d | |||
| 5e5b4f0a49 | |||
| e5b15bed7d | |||
| 2e4a6928c3 | |||
| 1aa77f6cda | |||
| d0c4b5c763 | |||
| 361e10d09c | |||
| 95cfbe30ed | |||
| 87b8bafaea | |||
| 1a703e623a | |||
| 1ebefca029 | |||
| ffff26e165 | |||
| 61df7bf42c | |||
| c5fb42ff10 | |||
| cbc33cd7ae | |||
| 645c10a3c3 | |||
| 54293a66b3 | |||
| 01b3caa38c | |||
| 64642e1d1b | |||
| 546cdd0d91 | |||
| a83f4a48d0 | |||
| 99d3e68d59 | |||
| 01189e99fc | |||
| f493cb5846 | |||
| e9814b4a4d | |||
| 3b12aaf2ab | |||
| 7e20278489 | |||
| 3d73d5a687 | |||
| fa3a472c6b | |||
| 5f38301861 | |||
| 4d5632912a | |||
| a74034a012 | |||
| 3f64c72c24 | |||
| 68c45ce791 | |||
| e8cb6d6d34 | |||
| a2f2f72aaf | |||
| 7bb9609827 | |||
| 82107f4b6c | |||
| ee49ca4abf | |||
| 48548f0896 | |||
| 981e97ee76 | |||
| a568aa70fd | |||
| 218adcb29b | |||
| 6c55bcfdd8 | |||
| 8d04ee3e01 | |||
| 6781ef5bd1 | |||
| 7e94b64ae7 | |||
| 64a142723a | |||
| 50f6c38c84 | |||
| 9fb4c32c6a | |||
| 280ed5d706 | |||
| 2518ff3568 | |||
| c65ce8a42c | |||
| e5986d0034 | |||
| b2bb12fd2c | |||
| 7f30353fb9 | |||
| 235bccced5 | |||
| 67e6b9e495 | |||
| ea42911530 | |||
| 6823c5a7ec | |||
| d1272d296a | |||
| d2a571a868 | |||
| d120e54284 | |||
| 3a6af38582 | |||
| e157d811cb | |||
| 56a4e67009 | |||
| 5b93765802 | |||
| acea1d33f9 | |||
| 528a61718d | |||
| c5a75a1fb2 | |||
| 3790a0f8b4 | |||
| 779afe5d62 | |||
| 26b5658d70 | |||
| c568a69452 | |||
| 13ad53eef7 | |||
| b14758baff | |||
| b5c3fc0a08 | |||
| b29f8d0f2b | |||
| 5699875e30 | |||
| 3081e419e1 | |||
| a7b38a6940 | |||
| 1f04165a33 | |||
| 36db898d01 | |||
| 38a1ff42e5 | |||
| a4a2841c05 | |||
| 0ece204393 | |||
| 6344d74ae3 | |||
| faec0d9d15 | |||
| e14a2eb94b | |||
| cb3dc28e6e | |||
| 38dc65180b | |||
| 2405706643 | |||
| 5371cd1468 | |||
| 7978395083 | |||
| d0a944fcaa | |||
| 537384e9b5 | |||
| e650d8c55c | |||
| 9663215bb4 | |||
| b2537052ef | |||
| 2e51908bec | |||
| a8edbd8622 | |||
| 06d0852e74 | |||
| 03066ea29a | |||
| e66c9a82a2 | |||
| e86e6cf24f | |||
| 7e8238abc0 | |||
| 6448c8f1a8 | |||
| 3b6688253f | |||
| 45e7cd085a | |||
| 7c9cf84316 | |||
| 580775b47c | |||
| eab26a1e01 | |||
| 1910856c6c | |||
| 4b3e715291 | |||
| 6ca5a24a7f | |||
| 0d6b02090e | |||
| 5b487f651a | |||
| e7a364d610 | |||
| 161be3ff33 | |||
| 1fc0e52dc3 | |||
| 7bf4ca1837 | |||
| 59c3b26655 | |||
| 8296fe32cb | |||
| 4a15699669 | |||
| 24b990d899 | |||
| ca9a257eec | |||
| 6d4bb59865 | |||
| 9b75abfbe6 | |||
| c4f95f14cb | |||
| 9c74014443 | |||
| a25cb9e07a | |||
| 2cd7c94653 | |||
| 9648eba5bb | |||
| 18495c44aa | |||
| 4f22354aae | |||
| 9d00b8d4a8 | |||
| aeacd0a7d3 | |||
| 526700f2de | |||
| a25a9450a6 | |||
| f8a825e9b9 | |||
| dbddd762a9 | |||
| e715b77307 | |||
| d1388ff446 | |||
| 38e8a830c9 | |||
| 7605c2e8fb | |||
| 16bc44948e | |||
| e4f85c1e08 | |||
| 222110e153 | |||
| a40004f9cc | |||
| 0ba49518e9 | |||
| 14eb1a7fd8 | |||
| 23a8124c51 | |||
| f5af9dc4e2 | |||
| d8e4e34b74 | |||
| d98fdf4b2e | |||
| 4e5f17e907 | |||
| 38cdaeb191 | |||
| 956eb77369 | |||
| 465986e397 | |||
| 23a5583a7f | |||
| aec7efc3af | |||
| 30353f1a83 | |||
| 6f4bf55d5e | |||
| 2e77920943 | |||
| 4a813aec42 | |||
| 4304ec63bb | |||
| 6e89e69bba | |||
| 9a4282208e | |||
| 238fc0799e | |||
| 5b95a0fcb6 | |||
| 1946ac415a | |||
| 2186317676 | |||
| 5ab1037094 | |||
| f6d7ff1084 | |||
| 3e242d0bad | |||
| c491fbeabc | |||
| e654e8ec8a | |||
| 48e9e51f4f | |||
| 5a2172dc56 | |||
| 07535ec3ec | |||
| b8dd466988 | |||
| 01a92476e6 | |||
| 3928a7e359 | |||
| f9f614a474 | |||
| e30a5939d0 | |||
| 4b2f3fecb5 | |||
| ccfc119e45 | |||
| 11adf11957 | |||
| 38b5018bab | |||
| 404c284440 | |||
| e0655008b3 | |||
| 22f2392fca | |||
| 136c5ec653 | |||
| faddb4900f | |||
| 6ca7d5ec27 | |||
| 6f679a9e78 | |||
| b0eeaa6679 | |||
| d22e1dc018 | |||
| 3a0799ec18 | |||
| 4e163b2888 | |||
| 86da2eed3a | |||
| e7ed01b35f | |||
| 4d303caa62 | |||
| 3f55c98a3f | |||
| 2723e1049e | |||
| f11eb7d69b | |||
| 73b49c1038 | |||
| 4bec68713a | |||
| 8b90cd6ba1 | |||
| ac72bf34dd | |||
| a282cb7a84 | |||
| b52f916af6 | |||
| ec7810f08e | |||
| 904682b856 | |||
| 92fd5bc3e1 | |||
| d75c6d6538 | |||
| a30beded1c | |||
| df7bbb28d5 | |||
| dc40f16161 | |||
| 1abfb7e965 | |||
| d2d75787d2 | |||
| 5c57e17b77 | |||
| deaa85c40e |
40
.clang-tidy
40
.clang-tidy
@ -1,12 +1,37 @@
|
||||
WarningsAsErrors: '*'
|
||||
Checks: '
|
||||
-*,
|
||||
bugprone-*,
|
||||
-bugprone-assignment-in-if-condition,
|
||||
-bugprone-branch-clone,
|
||||
-bugprone-easily-swappable-parameters,
|
||||
-bugprone-implicit-widening-of-multiplication-result,
|
||||
-bugprone-macro-parentheses,
|
||||
-bugprone-narrowing-conversions,
|
||||
-bugprone-unhandled-self-assignment,
|
||||
cert-*,
|
||||
-cert-dcl50-cpp,
|
||||
-cert-env33-c,
|
||||
-cert-err33-c,
|
||||
-cert-err58-cpp,
|
||||
cppcoreguidelines-*,
|
||||
-cppcoreguidelines-avoid-c-arrays,
|
||||
-cppcoreguidelines-avoid-do-while,
|
||||
-cppcoreguidelines-avoid-magic-numbers,
|
||||
-cppcoreguidelines-avoid-non-const-global-variables,
|
||||
-cppcoreguidelines-init-variables,
|
||||
-cppcoreguidelines-macro-usage,
|
||||
-cppcoreguidelines-narrowing-conversions,
|
||||
-cppcoreguidelines-no-malloc,
|
||||
-cppcoreguidelines-owning-memory,
|
||||
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
||||
-cppcoreguidelines-pro-bounds-constant-array-index,
|
||||
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
|
||||
-cppcoreguidelines-pro-type-const-cast,
|
||||
-cppcoreguidelines-pro-type-member-init,
|
||||
-cppcoreguidelines-pro-type-reinterpret-cast,
|
||||
-cppcoreguidelines-pro-type-union-access,
|
||||
-cppcoreguidelines-pro-type-vararg,
|
||||
google-*,
|
||||
-google-build-using-namespace,
|
||||
-google-readability-casting,
|
||||
@ -15,28 +40,37 @@ Checks: '
|
||||
-google-runtime-int,
|
||||
-google-runtime-references,
|
||||
misc-*,
|
||||
-misc-const-correctness,
|
||||
-misc-include-cleaner,
|
||||
-misc-no-recursion,
|
||||
-misc-redundant-expression,
|
||||
-misc-unused-parameters,
|
||||
-misc-use-anonymous-namespace,
|
||||
modernize-*,
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-deprecated-headers,
|
||||
-modernize-loop-convert,
|
||||
-modernize-make-unique,
|
||||
-modernize-raw-string-literal,
|
||||
-modernize-return-braced-init-list,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-nullptr,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-use-using,
|
||||
performance-*,
|
||||
-performance-avoid-endl,
|
||||
-performance-no-int-to-ptr,
|
||||
portability-*,
|
||||
readability-*,
|
||||
-readability-braces-around-statements,
|
||||
-readability-else-after-return,
|
||||
-readability-function-cognitive-complexity,
|
||||
-readability-function-size,
|
||||
-readability-identifier-length,
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-inconsistent-declaration-parameter-name,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
-readability-named-parameter,
|
||||
-readability-simplify-boolean-expr'
|
||||
-readability-redundant-declaration,
|
||||
-readability-simplify-boolean-expr,
|
||||
-readability-suspicious-call-argument'
|
||||
|
||||
44
.github/ISSUE_TEMPLATE.md
vendored
44
.github/ISSUE_TEMPLATE.md
vendored
@ -1,28 +1,40 @@
|
||||
<!-- --------------------------------------------------------------------------
|
||||
The following information is very important in order to help us to help you.
|
||||
Omission of the following details may delay your support request or receive no
|
||||
attention at all.
|
||||
Keep in mind that the commands we provide to retrieve information are oriented
|
||||
to GNU/Linux Distributions, so you could need to use others if you use s3fs on
|
||||
macOS or BSD.
|
||||
--------------------------------------------------------------------------- -->
|
||||
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
_Keep in mind that the commands we provide to retrieve information are oriented to GNU/Linux Distributions, so you could need to use others if you use s3fs on macOS or BSD_
|
||||
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
#### Version of s3fs being used (`s3fs --version`)
|
||||
<!-- example: V1.91 (commit:b19262a) -->
|
||||
|
||||
#### Version of fuse being used (pkg-config --modversion fuse, rpm -qi fuse, dpkg -s fuse)
|
||||
_example: 2.9.4_
|
||||
#### Version of fuse being used (`pkg-config --modversion fuse`, `rpm -qi fuse` or `dpkg -s fuse`)
|
||||
<!-- example: 2.9.2 -->
|
||||
|
||||
#### Kernel information (uname -r)
|
||||
_command result: uname -r_
|
||||
#### Kernel information (`uname -r`)
|
||||
<!-- example: 5.10.96-90.460.amzn2.x86_64 -->
|
||||
|
||||
#### GNU/Linux Distribution, if applicable (cat /etc/os-release)
|
||||
_command result: cat /etc/os-release_
|
||||
#### GNU/Linux Distribution, if applicable (`cat /etc/os-release`)
|
||||
<!-- command result -->
|
||||
|
||||
#### s3fs command line used, if applicable
|
||||
#### How to run s3fs, if applicable
|
||||
<!-- Describe the s3fs "command line" or "/etc/fstab" entry used. -->
|
||||
[] command line
|
||||
[] /etc/fstab
|
||||
|
||||
<!-- Executed command line or /etc/fastab entry -->
|
||||
```
|
||||
```
|
||||
#### /etc/fstab entry, if applicable
|
||||
```
|
||||
```
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, journalctl | grep s3fs, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
|
||||
#### s3fs syslog messages (`grep s3fs /var/log/syslog`, `journalctl | grep s3fs`, or `s3fs outputs`)
|
||||
<!-- if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages. -->
|
||||
```
|
||||
```
|
||||
|
||||
### Details about issue
|
||||
<!-- Please describe the content of the issue in detail. -->
|
||||
|
||||
|
||||
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,5 +1,11 @@
|
||||
<!-- --------------------------------------------------------------------------
|
||||
Please describe the purpose of the pull request(such as resolving the issue)
|
||||
and what the fix/update is.
|
||||
--------------------------------------------------------------------------- -->
|
||||
|
||||
### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
<!-- If there are Issues related to this PullRequest, please list it. -->
|
||||
|
||||
### Details
|
||||
_Please describe the details of PullRequest._
|
||||
<!-- Please describe the details of PullRequest. -->
|
||||
|
||||
|
||||
175
.github/workflows/ci.yml
vendored
175
.github/workflows/ci.yml
vendored
@ -50,16 +50,19 @@ jobs:
|
||||
#
|
||||
matrix:
|
||||
container:
|
||||
- ubuntu:21.10
|
||||
- ubuntu:23.10
|
||||
- ubuntu:22.04
|
||||
- ubuntu:20.04
|
||||
- ubuntu:18.04
|
||||
- debian:bookworm
|
||||
- debian:bullseye
|
||||
- debian:buster
|
||||
- debian:stretch
|
||||
- rockylinux:9
|
||||
- rockylinux:8
|
||||
- centos:centos7
|
||||
- fedora:35
|
||||
- fedora:39
|
||||
- fedora:38
|
||||
- opensuse/leap:15
|
||||
- alpine:3.18
|
||||
|
||||
container:
|
||||
image: ${{ matrix.container }}
|
||||
@ -80,8 +83,18 @@ jobs:
|
||||
run: |
|
||||
if [ "${{ matrix.container }}" = "opensuse/leap:15" ]; then zypper install -y tar gzip; fi
|
||||
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v2
|
||||
# [NOTE]
|
||||
# actions/checkout@v3 uses nodejs v16 and will be deprecated.
|
||||
# However, @v4 does not work on centos7 depending on the glibc version,
|
||||
# so we will continue to use @v3.
|
||||
#
|
||||
- name: Checkout source code(other than centos7)
|
||||
if: matrix.container != 'centos:centos7'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Checkout source code(only centos7)
|
||||
if: matrix.container == 'centos:centos7'
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# [NOTE]
|
||||
# Matters that depend on OS:VERSION are determined and executed in the following script.
|
||||
@ -97,79 +110,179 @@ jobs:
|
||||
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
|
||||
make --jobs=$(nproc)
|
||||
|
||||
- name: clang-tidy
|
||||
run: |
|
||||
# skip if clang-tidy does not exist, e.g., CentOS 7
|
||||
if command -v clang-tidy; then
|
||||
make -C src/ clang-tidy
|
||||
make -C test/ clang-tidy
|
||||
fi
|
||||
|
||||
- name: Cppcheck
|
||||
run: |
|
||||
# work around resource leak false positives on older Linux distributions
|
||||
if cppcheck --version | awk '{if ($2 <= 1.86) { exit(1) } }'; then
|
||||
# specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z)
|
||||
if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then
|
||||
make cppcheck
|
||||
fi
|
||||
|
||||
- name: Shellcheck
|
||||
run: |
|
||||
make shellcheck
|
||||
if shellcheck --version | awk -F '[ .]' '/version:/ && ($2 * 1000 + $3 <= 7) { exit(1) }'; then
|
||||
make shellcheck
|
||||
fi
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
make check -C src
|
||||
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
|
||||
|
||||
# [NOTE]
|
||||
# A case of "runs-on: macos-11.0" does not work,
|
||||
# because load_osxfuse returns exit code = 1.
|
||||
# Maybe it needs to reboot. Apple said
|
||||
# "Installing a new kernel extension requires signing in as an Admin user. You must also restart your Mac to load the extension".
|
||||
# Then we do not use macos 11 on GitHub Actions now.
|
||||
# Using macos-fuse-t
|
||||
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
|
||||
# see. https://github.com/macos-fuse-t/fuse-t
|
||||
# About osxfuse
|
||||
# This job doesn't work with Github Actions using macOS 11+ because "load_osxfuse" returns
|
||||
# "exit code = 1".(requires OS reboot)
|
||||
#
|
||||
macos10:
|
||||
runs-on: macos-10.15
|
||||
macos12:
|
||||
runs-on: macos-12
|
||||
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Brew tap
|
||||
run: |
|
||||
TAPS="$(brew --repository)/Library/Taps";
|
||||
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then rm -rf "$TAPS/caskroom/homebrew-cask"; fi;
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap macos-fuse-t/homebrew-cask
|
||||
|
||||
- name: Install osxfuse
|
||||
- name: Install fuse-t
|
||||
run: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install osxfuse
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install fuse-t
|
||||
|
||||
- name: Install brew other packages
|
||||
run: |
|
||||
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck';
|
||||
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck jq';
|
||||
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi; done;
|
||||
|
||||
- name: Install awscli
|
||||
- name: Install awscli2
|
||||
run: |
|
||||
if pip3 --version; then pip3 install awscli; else curl https://bootstrap.pypa.io/get-pip.py | sudo python; pip install awscli --ignore-installed matplotlib; fi
|
||||
|
||||
- name: Check osxfuse permission
|
||||
run: |
|
||||
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
|
||||
cd /tmp
|
||||
curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg"
|
||||
sudo installer -pkg AWSCLIV2.pkg -target /
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++11 -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
make --jobs=$(sysctl -n hw.ncpu)
|
||||
|
||||
- name: Cppcheck
|
||||
run: |
|
||||
make cppcheck
|
||||
# specify the version range to run cppcheck (cppcheck version number is x.y or x.y.z)
|
||||
if cppcheck --version | sed -e 's/\./ /g' | awk '{if (($2 * 1000 + $3) <= 2004) { exit(1) } }'; then
|
||||
make cppcheck
|
||||
fi
|
||||
|
||||
- name: Shellcheck
|
||||
run: |
|
||||
make shellcheck
|
||||
if shellcheck --version | awk -F '[ .]' '/version:/ && ($2 * 1000 + $3 <= 7) { exit(1) }'; then
|
||||
make shellcheck
|
||||
fi
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
make check -C src
|
||||
echo "user_allow_other" | sudo tee -a /etc/fuse.conf >/dev/null
|
||||
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
|
||||
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
|
||||
|
||||
MemoryTest:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
#
|
||||
# build matrix for containers
|
||||
#
|
||||
strategy:
|
||||
#
|
||||
# do not stop jobs automatically if any of the jobs fail
|
||||
#
|
||||
fail-fast: false
|
||||
|
||||
#
|
||||
# matrix for type of checking
|
||||
#
|
||||
# [NOTE]
|
||||
# Currently following test is not supported:
|
||||
# - sanitize_memory : Future support planned
|
||||
#
|
||||
matrix:
|
||||
checktype:
|
||||
- glibc_debug
|
||||
- sanitize_address
|
||||
- sanitize_others
|
||||
- sanitize_thread
|
||||
- valgrind
|
||||
|
||||
container:
|
||||
image: fedora:39
|
||||
|
||||
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
|
||||
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
.github/workflows/linux-ci-helper.sh fedora:39
|
||||
|
||||
- name: Install clang
|
||||
run: |
|
||||
dnf install -y clang
|
||||
if [ "${{ matrix.checktype }}" = "valgrind" ]; then
|
||||
dnf install -y valgrind
|
||||
fi
|
||||
|
||||
#
|
||||
# Set CXX/CXXFLAGS and Variables for test
|
||||
#
|
||||
- name: Set variables
|
||||
run: |
|
||||
COMMON_CXXFLAGS='-g -Wno-cpp -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
if [ "${{ matrix.checktype }}" = "glibc_debug" ]; then
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -D_GLIBCXX_DEBUG" >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_address" ]; then
|
||||
echo 'CXX=clang++' >> $GITHUB_ENV
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=address -fsanitize-address-use-after-scope" >> $GITHUB_ENV
|
||||
echo 'ASAN_OPTIONS=detect_leaks=1,detect_stack_use_after_return=1' >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_memory" ]; then
|
||||
echo 'CXX=clang++' >> $GITHUB_ENV
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=memory" >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_thread" ]; then
|
||||
echo 'CXX=clang++' >> $GITHUB_ENV
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=thread" >> $GITHUB_ENV
|
||||
echo 'TSAN_OPTIONS=halt_on_error=1' >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_others" ]; then
|
||||
echo 'CXX=clang++' >> $GITHUB_ENV
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow" >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.checktype }}" = "valgrind" ]; then
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1" >> $GITHUB_ENV
|
||||
echo 'VALGRIND=--leak-check=full' >> $GITHUB_ENV
|
||||
echo 'RETRIES=100' >> $GITHUB_ENV
|
||||
echo 'S3_URL=http://127.0.0.1:8081' >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
/bin/sh -c "CXX=${CXX} CXXFLAGS=\"${CXXFLAGS}\" ./configure --prefix=/usr --with-openssl"
|
||||
make
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
/bin/sh -c "ALL_TESTS=1 ASAN_OPTIONS=${ASAN_OPTIONS} TSAN_OPTIONS=${TSAN_OPTIONS} VALGRIND=${VALGRIND} RETRIES=${RETRIES} make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)"
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
|
||||
183
.github/workflows/linux-ci-helper.sh
vendored
183
.github/workflows/linux-ci-helper.sh
vendored
@ -19,13 +19,25 @@
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
echo "${PRGNAME} [INFO] Start Linux helper for installing packages."
|
||||
# [NOTE]
|
||||
# Since bash is not present in some Runner containers, this script
|
||||
# runs in sh.
|
||||
# pipefail etc. are not native variables of sh. It exists in bash's
|
||||
# sh compatibility mode, but doesn't work in sh compatibility mode
|
||||
# of ash such as alpine.
|
||||
# However, it's not fatal that pipefail doesn't work for this script.
|
||||
#
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
#set -o pipefail
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Common variables
|
||||
#-----------------------------------------------------------
|
||||
PRGNAME=$(basename "$0")
|
||||
|
||||
echo "${PRGNAME} [INFO] Start Linux helper for installing packages."
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Parameter check
|
||||
#-----------------------------------------------------------
|
||||
@ -46,119 +58,169 @@ CONTAINER_OSNAME=$(echo "${CONTAINER_FULLNAME}" | sed 's/:/ /g' | awk '{print $1
|
||||
CONTAINER_OSVERSION=$(echo "${CONTAINER_FULLNAME}" | sed 's/:/ /g' | awk '{print $2}')
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Common variables for pip
|
||||
# Common variables for awscli2
|
||||
#-----------------------------------------------------------
|
||||
PIP_BIN="pip3"
|
||||
PIP_OPTIONS="--upgrade"
|
||||
INSTALL_AWSCLI_PACKAGES="awscli"
|
||||
AWSCLI_URI="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip"
|
||||
AWSCLI_ZIP_FILE="awscliv2.zip"
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Parameters for configure(set environments)
|
||||
#-----------------------------------------------------------
|
||||
# shellcheck disable=SC2089
|
||||
CONFIGURE_OPTIONS="CXXFLAGS='-O -std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1' --prefix=/usr --with-openssl"
|
||||
CXXFLAGS="-O -DS3FS_PTHREAD_ERRORCHECK=1"
|
||||
CONFIGURE_OPTIONS="--prefix=/usr --with-openssl"
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# OS dependent variables
|
||||
#-----------------------------------------------------------
|
||||
if [ "${CONTAINER_FULLNAME}" = "ubuntu:21.10" ]; then
|
||||
#
|
||||
# Default values
|
||||
#
|
||||
PACKAGE_ENABLE_REPO_OPTIONS=""
|
||||
PACKAGE_INSTALL_ADDITIONAL_OPTIONS=""
|
||||
SHELLCHECK_DIRECT_INSTALL=0
|
||||
AWSCLI_DIRECT_INSTALL=1
|
||||
|
||||
if [ "${CONTAINER_FULLNAME}" = "ubuntu:23.10" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip"
|
||||
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-21-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:20.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip"
|
||||
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:18.04" ]; then
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:16.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl python3-pip"
|
||||
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:bullseye" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip"
|
||||
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy openjdk-17-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:buster" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip"
|
||||
INSTALL_PACKAGES="autoconf autotools-dev clang-tidy default-jre-headless fuse jq libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:stretch" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
elif [ "${CONTAINER_FULLNAME}" = "rockylinux:9" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
PACKAGE_ENABLE_REPO_OPTIONS="--enablerepo=crb"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev default-jre-headless fuse libfuse-dev libcurl4-openssl-dev libxml2-dev locales-all mime-support libtool pkg-config libssl-dev attr curl procps python3-pip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
# [NOTE]
|
||||
# Rocky Linux 9 (or CentOS Stream 9) images may have curl installation issues that
|
||||
# conflict with the curl-minimal package.
|
||||
#
|
||||
PACKAGE_INSTALL_ADDITIONAL_OPTIONS="--allowerasing"
|
||||
|
||||
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel attr diffutils curl python3 procps unzip xz https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm"
|
||||
INSTALL_CHECKER_PKGS="cppcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel"
|
||||
|
||||
# [NOTE]
|
||||
# For RockyLinux, ShellCheck is downloaded from the github archive and installed.
|
||||
#
|
||||
SHELLCHECK_DIRECT_INSTALL=1
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "rockylinux:8" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
# [NOTE]
|
||||
# Installing ShellCheck on Rocky Linux is not easy.
|
||||
# Give up to run ShellCheck on Rocky Linux as we don't have to run ShellChek on all operating systems.
|
||||
#
|
||||
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr diffutils curl python3"
|
||||
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-17-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel attr diffutils curl python3 unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=powertools"
|
||||
|
||||
# [NOTE]
|
||||
# For RockyLinux, ShellCheck is downloaded from the github archive and installed.
|
||||
#
|
||||
SHELLCHECK_DIRECT_INSTALL=1
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "centos:centos7" ]; then
|
||||
PACKAGE_MANAGER_BIN="yum"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
# [NOTE]
|
||||
# ShellCheck version(0.3.8) is too low to check.
|
||||
# And in this version, it cannot be passed due to following error.
|
||||
# "shellcheck: ./test/integration-test-main.sh: hGetContents: invalid argument (invalid byte sequence)"
|
||||
#
|
||||
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel attr curl python3 epel-release"
|
||||
INSTALL_CHECKER_PKGS="cppcheck"
|
||||
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel llvm-toolset-7-clang-tools-extra gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl openssl-devel attr curl python3 epel-release unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck jq"
|
||||
INSTALL_CHECKER_PKG_OPTIONS="--enablerepo=epel"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "fedora:35" ]; then
|
||||
elif [ "${CONTAINER_FULLNAME}" = "fedora:39" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
# TODO: Cannot use java-latest-openjdk (17) due to modules issue in S3Proxy/jclouds/Guice
|
||||
INSTALL_PACKAGES="curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-11-openjdk-headless libxml2-devel mailcap git automake make openssl-devel curl attr diffutils procps python3-pip"
|
||||
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel curl attr diffutils procps python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "fedora:38" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="clang-tools-extra curl-devel fuse fuse-devel gcc libstdc++-devel gcc-c++ glibc-langpack-en java-latest-openjdk-headless jq libxml2-devel mailcap git automake make openssl openssl-devel curl attr diffutils procps python3-pip unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ]; then
|
||||
PACKAGE_MANAGER_BIN="zypper"
|
||||
PACKAGE_UPDATE_OPTIONS="refresh"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES="automake curl-devel fuse fuse-devel gcc-c++ java-11-openjdk-headless libxml2-devel make openssl-devel python3-pip curl attr ShellCheck"
|
||||
INSTALL_PACKAGES="automake clang-tools curl-devel fuse fuse-devel gcc-c++ java-17-openjdk-headless jq libxml2-devel make openssl openssl-devel python3-pip curl attr ShellCheck unzip"
|
||||
INSTALL_CHECKER_PKGS="cppcheck ShellCheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.18" ]; then
|
||||
PACKAGE_MANAGER_BIN="apk"
|
||||
PACKAGE_UPDATE_OPTIONS="update --no-progress"
|
||||
PACKAGE_INSTALL_OPTIONS="add --no-progress --no-cache"
|
||||
|
||||
INSTALL_PACKAGES="bash clang-extra-tools curl g++ make automake autoconf libtool git curl-dev fuse-dev jq libxml2-dev openssl coreutils procps attr sed mailcap openjdk17 aws-cli"
|
||||
INSTALL_CHECKER_PKGS="cppcheck shellcheck"
|
||||
INSTALL_CHECKER_PKG_OPTIONS=""
|
||||
|
||||
AWSCLI_DIRECT_INSTALL=0
|
||||
|
||||
else
|
||||
echo "No container configured for: ${CONTAINER_FULLNAME}"
|
||||
exit 1
|
||||
@ -177,10 +239,32 @@ echo "${PRGNAME} [INFO] Updates."
|
||||
# Install packages ( with cppcheck )
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Install packages."
|
||||
/bin/sh -c "${PACKAGE_MANAGER_BIN} install -y ${INSTALL_PACKAGES}"
|
||||
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${PACKAGE_ENABLE_REPO_OPTIONS} ${PACKAGE_INSTALL_OPTIONS} ${PACKAGE_INSTALL_ADDITIONAL_OPTIONS} ${INSTALL_PACKAGES}"
|
||||
|
||||
echo "${PRGNAME} [INFO] Install cppcheck package."
|
||||
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${INSTALL_CHECKER_PKG_OPTIONS} install -y ${INSTALL_CHECKER_PKGS}"
|
||||
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${INSTALL_CHECKER_PKG_OPTIONS} ${PACKAGE_INSTALL_OPTIONS} ${INSTALL_CHECKER_PKGS}"
|
||||
|
||||
#
|
||||
# Install ShellCheck manually
|
||||
#
|
||||
if [ "${SHELLCHECK_DIRECT_INSTALL}" -eq 1 ]; then
|
||||
echo "${PRGNAME} [INFO] Install shellcheck package from github archive."
|
||||
|
||||
if ! LATEST_SHELLCHECK_DOWNLOAD_URL=$(curl --silent --show-error https://api.github.com/repos/koalaman/shellcheck/releases/latest | jq -r '.assets[].browser_download_url | select(contains("linux.x86_64"))'); then
|
||||
echo "Could not get shellcheck package url"
|
||||
exit 1
|
||||
fi
|
||||
if ! curl -s -S -L -o /tmp/shellcheck.tar.xz "${LATEST_SHELLCHECK_DOWNLOAD_URL}"; then
|
||||
echo "Failed to download shellcheck package from ${LATEST_SHELLCHECK_DOWNLOAD_URL}"
|
||||
exit 1
|
||||
fi
|
||||
if ! tar -C /usr/bin/ -xf /tmp/shellcheck.tar.xz --no-anchored 'shellcheck' --strip=1; then
|
||||
echo "Failed to extract and install shellcheck."
|
||||
rm -f /tmp/shellcheck.tar.xz
|
||||
exit 1
|
||||
fi
|
||||
rm -f /tmp/shellcheck.tar.xz
|
||||
fi
|
||||
|
||||
# Check Java version
|
||||
java -version
|
||||
@ -188,17 +272,26 @@ java -version
|
||||
#
|
||||
# Install awscli
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Install awscli package."
|
||||
/bin/sh -c "${PIP_BIN} install ${PIP_OPTIONS} ${INSTALL_AWSCLI_PACKAGES}"
|
||||
/bin/sh -c "${PIP_BIN} install ${PIP_OPTIONS} rsa"
|
||||
if [ "${AWSCLI_DIRECT_INSTALL}" -eq 1 ]; then
|
||||
echo "${PRGNAME} [INFO] Install awscli2 package."
|
||||
|
||||
CURRENT_DIR=$(pwd)
|
||||
cd /tmp || exit 1
|
||||
|
||||
curl "${AWSCLI_URI}" -o "${AWSCLI_ZIP_FILE}"
|
||||
unzip "${AWSCLI_ZIP_FILE}"
|
||||
./aws/install
|
||||
|
||||
cd "${CURRENT_DIR}" || exit 1
|
||||
fi
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Set environment for configure
|
||||
#-----------------------------------------------------------
|
||||
echo "${PRGNAME} [INFO] Set environment for configure options"
|
||||
|
||||
# shellcheck disable=SC2090
|
||||
export CONFIGURE_OPTIONS
|
||||
echo "CXXFLAGS=${CXXFLAGS}" >> "${GITHUB_ENV}"
|
||||
echo "CONFIGURE_OPTIONS=${CONFIGURE_OPTIONS}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "${PRGNAME} [INFO] Finish Linux helper for installing packages."
|
||||
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -42,6 +42,7 @@ config.status
|
||||
config.sub
|
||||
configure
|
||||
configure.scan
|
||||
configure.ac~
|
||||
depcomp
|
||||
install-sh
|
||||
libtool
|
||||
@ -85,6 +86,9 @@ test/chaos-http-proxy-*
|
||||
test/junk_data
|
||||
test/s3proxy-*
|
||||
test/write_multiblock
|
||||
test/mknod_test
|
||||
test/truncate_read_file
|
||||
test/cr_filename
|
||||
|
||||
#
|
||||
# Windows ports
|
||||
|
||||
30
ChangeLog
30
ChangeLog
@ -1,6 +1,36 @@
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.94 -- 23 Feb, 2024 (major changes only)
|
||||
#2409 - Fixed a bug that mounting with ksmid specified to fail
|
||||
#2404 - Fixed ordering problem between fdatasync and flush
|
||||
#2399 - Fixed ListBucket/IAM edge cases
|
||||
#2376 - Corrected list_bucket to search in stat cache during creating new file
|
||||
#2369 - Make dir size 4096 not 0
|
||||
#2351 - Added option free_space_ratio to control cache size
|
||||
#2325 - Fixed a bug upload boundary calculation in StreamUpload
|
||||
#2298 - Abort MPU when MPU fails to avoid litter
|
||||
#2261 - Use explicit ownership for memory
|
||||
#2179 - Require C++11
|
||||
|
||||
Version 1.93 -- 19 Jul, 2023 (major changes only)
|
||||
#2212 - Allow listing implicit directories
|
||||
#2194 - #2209 - #2211 - #2214 - #2215 - Fix thread safety issues
|
||||
#2191 - #2201 - Add support for FUSE-T on macOS
|
||||
|
||||
Version 1.92 -- 21 May, 2023 (major changes only)
|
||||
#1802 - #2104 - New option: streamupload
|
||||
#1922 - Enable noobj_cache by default
|
||||
#1927 - #2101 - New option: credlib and credlib_ops
|
||||
#1957 - Fixed a bug that regular files could not be created by mknod
|
||||
#1964 - Added stat information to the mount point
|
||||
#1970 - #1986 - Enable notsup_compat_dir by default
|
||||
#2000 - #2001 - Set mtime/ctime/atime of all objects as nanosecond
|
||||
#2065 - Compatible with OpenSSL 3.0
|
||||
#2075 - Added proxy and proxy_cred_file option
|
||||
#2135 - Changed to rename cache files when renaming large files
|
||||
#2148 - New option: bucket_size
|
||||
|
||||
Version 1.91 -- 07 Mar, 2022 (major changes only)
|
||||
#1753 - Fix RowFlush can not upload last part smaller than 5MB using NoCacheMultipartPost
|
||||
#1760 - Fix IAM role retrieval from IMDSv2
|
||||
|
||||
@ -34,7 +34,7 @@ release : dist ../utils/release.sh
|
||||
cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
--std=c++11 \
|
||||
--xml \
|
||||
-D HAVE_ATTR_XATTR_H \
|
||||
-D HAVE_SYS_EXTATTR_H \
|
||||
@ -44,6 +44,8 @@ cppcheck:
|
||||
--enable=warning,style,information,missingInclude \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unmatchedSuppression \
|
||||
--suppress=useStlAlgorithm \
|
||||
--suppress=checkLevelNormal \
|
||||
src/ test/
|
||||
|
||||
#
|
||||
@ -80,6 +82,6 @@ shellcheck:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
19
README.md
19
README.md
@ -1,8 +1,9 @@
|
||||
# s3fs
|
||||
|
||||
s3fs allows Linux, macOS, and FreeBSD to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other
|
||||
tools like [AWS CLI](https://github.com/aws/aws-cli).
|
||||
s3fs allows Linux, macOS, and FreeBSD to mount an S3 bucket via [FUSE(Filesystem in Userspace)](https://github.com/libfuse/libfuse).
|
||||
s3fs makes you operate files and directories in S3 bucket like a local file system.
|
||||
s3fs preserves the native object format for files, allowing use of other tools like [AWS CLI](https://github.com/aws/aws-cli).
|
||||
|
||||
[](https://github.com/s3fs-fuse/s3fs-fuse/actions)
|
||||
[](https://twitter.com/s3fsfuse)
|
||||
|
||||
@ -71,7 +72,7 @@ Many systems provide pre-built packages:
|
||||
* macOS 10.12 and newer via [Homebrew](https://brew.sh/):
|
||||
|
||||
```
|
||||
brew install --cask osxfuse
|
||||
brew install --cask macfuse
|
||||
brew install gromgit/fuse/s3fs-mac
|
||||
```
|
||||
|
||||
@ -81,8 +82,9 @@ Many systems provide pre-built packages:
|
||||
pkg install fusefs-s3fs
|
||||
```
|
||||
|
||||
Note: Homebrew has deprecated osxfuse and s3fs may not install any more, see
|
||||
[#1618](https://github.com/s3fs-fuse/s3fs-fuse/issues/1618).
|
||||
* Windows:
|
||||
|
||||
Windows has its own install, seening in [this link](COMPILATION.md)
|
||||
|
||||
Otherwise consult the [compilation instructions](COMPILATION.md).
|
||||
|
||||
@ -91,6 +93,8 @@ Otherwise consult the [compilation instructions](COMPILATION.md).
|
||||
s3fs supports the standard
|
||||
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
|
||||
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
|
||||
Finally s3fs recognizes the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN`
|
||||
environment variables.
|
||||
|
||||
The default location for the s3fs password file can be created:
|
||||
|
||||
@ -158,6 +162,8 @@ Generally S3 cannot offer the same performance or semantics as a local file syst
|
||||
|
||||
## References
|
||||
|
||||
* [CSI for S3](https://github.com/ctrox/csi-s3) - Kubernetes CSI driver
|
||||
* [docker-s3fs-client](https://github.com/efrecon/docker-s3fs-client) - Docker image containing s3fs
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
@ -175,4 +181,3 @@ Generally S3 cannot offer the same performance or semantics as a local file syst
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
Licensed under the GNU GPL version 2
|
||||
|
||||
|
||||
@ -33,7 +33,8 @@ echo "--- Finished commit hash file ---"
|
||||
|
||||
echo "--- Start autotools -------------"
|
||||
|
||||
aclocal \
|
||||
autoupdate \
|
||||
&& aclocal \
|
||||
&& autoheader \
|
||||
&& automake --add-missing \
|
||||
&& autoconf
|
||||
@ -47,6 +48,6 @@ exit 0
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
57
configure.ac
57
configure.ac
@ -19,11 +19,11 @@
|
||||
######################################################################
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.91)
|
||||
AC_PREREQ([2.69])
|
||||
AC_INIT([s3fs],[1.94])
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
AC_CANONICAL_TARGET
|
||||
AM_INIT_AUTOMAKE([foreign])
|
||||
|
||||
AC_PROG_CXX
|
||||
@ -34,7 +34,7 @@ AC_CHECK_HEADERS([attr/xattr.h])
|
||||
AC_CHECK_HEADERS([sys/extattr.h])
|
||||
AC_CHECK_FUNCS([fallocate])
|
||||
|
||||
CXXFLAGS="$CXXFLAGS -Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2"
|
||||
CXXFLAGS="-Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3 -std=c++11 $CXXFLAGS"
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl For macOS
|
||||
@ -48,6 +48,7 @@ case "$target" in
|
||||
*-darwin* )
|
||||
# Do something specific for mac
|
||||
min_fuse_version=2.7.3
|
||||
min_fuse_t_version=1.0.20
|
||||
;;
|
||||
*)
|
||||
# Default Case
|
||||
@ -56,11 +57,24 @@ case "$target" in
|
||||
;;
|
||||
esac
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl Checking the FUSE library
|
||||
dnl ----------------------------------------------
|
||||
dnl Distinguish between Linux (libfuse) and macOS (FUSE-T).
|
||||
dnl
|
||||
found_fuse_t=no
|
||||
PKG_CHECK_MODULES([FUSE_T], [fuse-t >= ${min_fuse_t_version}], [found_fuse_t=yes], [found_fuse_t=no])
|
||||
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([fuse_library_checking], [fuse-t >= ${min_fuse_t_version}])],
|
||||
[PKG_CHECK_MODULES([fuse_library_checking], [fuse >= ${min_fuse_version}])])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl Choice SSL library
|
||||
dnl ----------------------------------------------
|
||||
auth_lib=na
|
||||
nettle_lib=no
|
||||
use_openssl_30=no
|
||||
|
||||
dnl
|
||||
dnl nettle library
|
||||
@ -180,15 +194,24 @@ AS_IF(
|
||||
|
||||
dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])])
|
||||
|
||||
AC_MSG_CHECKING([openssl 3.0 or later])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <openssl/opensslv.h>
|
||||
#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x30000000L
|
||||
#error "found openssl is 3.0 or later(so compiling is stopped with error)"
|
||||
#endif]], [[]])],
|
||||
[AC_MSG_RESULT(no)],
|
||||
[AC_MSG_RESULT(yes); use_openssl_30=yes])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
@ -197,7 +220,9 @@ gnutls)
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
|
||||
AS_IF([test $gnutls_nettle = 0],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])])
|
||||
LIBS="-lgnutls -lgcrypt $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(gcrypt)
|
||||
@ -211,7 +236,9 @@ nettle)
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
|
||||
AS_IF([test $gnutls_nettle = 1],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])])
|
||||
LIBS="-lgnutls -lnettle $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(nettle)
|
||||
@ -220,7 +247,9 @@ nettle)
|
||||
;;
|
||||
nss)
|
||||
AC_MSG_RESULT(NSS)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_ERROR([unknown ssl library type.])
|
||||
@ -228,6 +257,7 @@ nss)
|
||||
esac
|
||||
|
||||
AM_CONDITIONAL([USE_SSL_OPENSSL], [test "$auth_lib" = openssl])
|
||||
AM_CONDITIONAL([USE_SSL_OPENSSL_30], [test "$use_openssl_30" = yes])
|
||||
AM_CONDITIONAL([USE_SSL_GNUTLS], [test "$auth_lib" = gnutls -o "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss])
|
||||
@ -310,10 +340,15 @@ AC_COMPILE_IFELSE(
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl dl library
|
||||
dnl ----------------------------------------------
|
||||
AC_CHECK_LIB([dl], [dlopen, dlclose, dlerror, dlsym], [], [AC_MSG_ERROR([Could not found dlopen, dlclose, dlerror and dlsym])])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl build date
|
||||
dnl ----------------------------------------------
|
||||
AC_SUBST([MAN_PAGE_DATE], [$(date +"%B %Y")])
|
||||
AC_SUBST([MAN_PAGE_DATE], [$(date -r doc/man/s3fs.1.in +"%B %Y")])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl output files
|
||||
|
||||
@ -85,7 +85,7 @@ delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
\fB\-o\fR storage_class (default="standard")
|
||||
store object with specified storage class.
|
||||
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering, glacier, and deep_archive.
|
||||
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering, glacier, glacier_ir, and deep_archive.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
@ -110,6 +110,7 @@ You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:" (or "k:").
|
||||
If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
You must be careful about that you can not use the KMS id which is not same EC2 region.
|
||||
Additionally, if you specify SSE-KMS, your endpoints must use Secure Sockets Layer(SSL) or Transport Layer Security(TLS).
|
||||
.TP
|
||||
\fB\-o\fR load_sse_c - specify SSE-C keys
|
||||
Specify the custom-provided encryption keys file path for decrypting at downloading.
|
||||
@ -167,11 +168,10 @@ specify expire time (seconds) for entries in the stat cache and symbolic link ca
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_noobj_cache (default is disable)
|
||||
enable cache entries for the object which does not exist.
|
||||
s3fs always has to check whether file (or sub directory) exists under object (path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
|
||||
It increases ListBucket request and makes performance bad.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object (file or directory) does not exist.
|
||||
\fB\-o\fR disable_noobj_cache (default is enable)
|
||||
By default s3fs memorizes when an object does not exist up until the stat cache timeout.
|
||||
This caching can cause staleness for applications.
|
||||
If disabled, s3fs will not memorize objects and may cause extra HeadObject requests and reduce performance.
|
||||
.TP
|
||||
\fB\-o\fR no_check_certificate (by default this option is disabled)
|
||||
server certificate won't be checked against the available certificate authorities.
|
||||
@ -190,7 +190,7 @@ maximum number of parallel request for listing objects.
|
||||
.TP
|
||||
\fB\-o\fR parallel_count (default="5")
|
||||
number of parallel request for uploading big objects.
|
||||
s3fs uploads large object (over 20MB) by multipart post request, and sends parallel requests.
|
||||
s3fs uploads large object (over 25MB by default) by multipart post request, and sends parallel requests.
|
||||
This option limits parallel request count which s3fs requests at once.
|
||||
It is necessary to set this value depending on a CPU and a network band.
|
||||
.TP
|
||||
@ -210,11 +210,30 @@ Flush dirty data to S3 after a certain number of MB written.
|
||||
The minimum value is 50 MB. -1 value means disable.
|
||||
Cannot be used with nomixupload.
|
||||
.TP
|
||||
\fB\-o\fR bucket_size (default=maximum long unsigned integer value)
|
||||
The size of the bucket with which the corresponding
|
||||
elements of the statvfs structure will be filled. The option
|
||||
argument is an integer optionally followed by a
|
||||
multiplicative suffix (GB, GiB, TB, TiB, PB, PiB,
|
||||
EB, EiB) (no spaces in between). If no suffix is supplied,
|
||||
bytes are assumed; eg: 20000000, 30GB, 45TiB. Note that
|
||||
s3fs does not compute the actual volume size (too
|
||||
expensive): by default it will assume the maximum possible
|
||||
size; however, since this may confuse other software which
|
||||
uses s3fs, the advertised bucket size can be set with this
|
||||
option.
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree (default 0)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use disk space as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR free_space_ratio (default="10")
|
||||
sets min free space ratio of the disk. The value of this option can be between 0 and 100. It will control
|
||||
the size of the cache according to this ratio to ensure that the idle ratio of the disk is greater than this value.
|
||||
For example, when the disk space is 50GB, the default value will
|
||||
ensure that the disk will reserve at least 50GB * 10%% = 5GB of remaining space.
|
||||
.TP
|
||||
\fB\-o\fR multipart_threshold (default="25")
|
||||
threshold, in MB, to use multipart upload instead of
|
||||
single-part. Must be at least 5 MB.
|
||||
@ -259,9 +278,21 @@ that they did not create.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
\fB\-o\fR streamupload (default is disable)
|
||||
Enable stream upload.
|
||||
If this option is enabled, a sequential upload will be performed in parallel with the write from the part that has been written during a multipart upload.
|
||||
This is expected to give better performance than other upload functions.
|
||||
Note that this option is still experimental and may change in the future.
|
||||
.TP
|
||||
\fB\-o\fR max_thread_count (default is "5")
|
||||
Specifies the number of threads waiting for stream uploads.
|
||||
Note that this option and Streamm Upload are still experimental and subject to change in the future.
|
||||
This option will be merged with "parallel_count" in the future.
|
||||
.TP
|
||||
\fB\-o\fR enable_content_md5 (default is disable)
|
||||
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
|
||||
This can add CPU overhead to transfers.
|
||||
.TP
|
||||
\fB\-o\fR enable_unsigned_payload (default is disable)
|
||||
Do not calculate Content-SHA256 for PutObject and UploadPart
|
||||
payloads. This can reduce CPU overhead to transfers.
|
||||
@ -284,6 +315,15 @@ This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSA
|
||||
\fB\-o\fR ibm_iam_endpoint (default is https://iam.cloud.ibm.com)
|
||||
Sets the URL to use for IBM IAM authentication.
|
||||
.TP
|
||||
\fB\-o\fR credlib (default=\"\" which means disabled)
|
||||
Specifies the shared library that handles the credentials containing the authentication token.
|
||||
If this option is specified, the specified credential and token processing provided by the shared library ant will be performed instead of the built-in credential processing.
|
||||
This option cannot be specified with passwd_file, profile, use_session_token, ecs, ibm_iam_auth, ibm_iam_endpoint, imdsv1only and iam_role option.
|
||||
.TP
|
||||
\fB\-o\fR credlib_opts (default=\"\" which means disabled)
|
||||
Specifies the options to pass when the shared library specified in credlib is loaded and then initialized.
|
||||
For the string specified in this option, specify the string defined by the shared library.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr (default is not handling the extended attribute)
|
||||
Enable to handle the extended attribute (xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
@ -332,15 +372,15 @@ This name will be added to logging messages and user agent headers sent by s3fs.
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
.TP
|
||||
\fB\-o\fR notsup_compat_dir (disable support of alternative directory names)
|
||||
\fB\-o\fR compat_dir (enable support of alternative directory names)
|
||||
.RS
|
||||
s3fs supports the three different naming schemas "dir/", "dir" and "dir_$folder$" to map directory names to S3 objects and vice versa. As a fourth variant, directories can be determined indirectly if there is a file object with a path (e.g. "/dir/file") but without the parent directory.
|
||||
s3fs supports two different naming schemas "dir/" and "dir" to map directory names to S3 objects and vice versa by default. As a third variant, directories can be determined indirectly if there is a file object with a path (e.g. "/dir/file") but without the parent directory. This option enables a fourth variant, "dir_$folder$", created by older applications.
|
||||
.TP
|
||||
S3fs uses only the first schema "dir/" to create S3 objects for directories.
|
||||
.TP
|
||||
The support for these different naming schemas causes an increased communication effort.
|
||||
.TP
|
||||
If all applications exclusively use the "dir/" naming scheme and the bucket does not contain any objects with a different naming scheme, this option can be used to disable support for alternative naming schemes. This reduces access time and can save costs.
|
||||
If you do not have access permissions to the bucket and specify a directory path created by a client other than s3fs for the mount point, you cannot start because the mount point directory cannot be found by s3fs. But by specifying this option, you can avoid this error.
|
||||
.RE
|
||||
.TP
|
||||
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
|
||||
@ -364,6 +404,20 @@ Specify the path of the mime.types file.
|
||||
If this option is not specified, the existence of "/etc/mime.types" is checked, and that file is loaded as mime information.
|
||||
If this file does not exist on macOS, then "/etc/apache2/mime.types" is checked as well.
|
||||
.TP
|
||||
\fB\-o\fR proxy (default="")
|
||||
This option specifies a proxy to S3 server.
|
||||
Specify the proxy with '[<scheme://]hostname(fqdn)[:<port>]' formatted.
|
||||
'<schema>://' can be omitted, and 'http://' is used when omitted.
|
||||
Also, ':<port>' can also be omitted. If omitted, port 443 is used for HTTPS schema, and port 1080 is used otherwise.
|
||||
This option is the same as the curl command's '--proxy(-x)' option and libcurl's 'CURLOPT_PROXY' flag.
|
||||
This option is equivalent to and takes precedence over the environment variables 'http_proxy', 'all_proxy', etc.
|
||||
.TP
|
||||
\fB\-o\fR proxy_cred_file (default="")
|
||||
This option specifies the file that describes the username and passphrase for authentication of the proxy when the HTTP schema proxy is specified by the 'proxy' option.
|
||||
Username and passphrase are valid only for HTTP schema.
|
||||
If the HTTP proxy does not require authentication, this option is not required.
|
||||
Separate the username and passphrase with a ':' character and specify each as a URL-encoded string.
|
||||
.TP
|
||||
\fB\-o\fR logfile - specify the log output file.
|
||||
s3fs outputs the log file to syslog. Alternatively, if s3fs is started with the "-f" option specified, the log will be output to the stdout/stderr.
|
||||
You can use this option to specify the log file that s3fs outputs.
|
||||
@ -390,6 +444,10 @@ If the cache is enabled, you can check the integrity of the cache file and the c
|
||||
This option is specified and when sending the SIGUSR1 signal to the s3fs process checks the cache status at that time.
|
||||
This option can take a file path as parameter to output the check result to that file.
|
||||
The file path parameter can be omitted. If omitted, the result will be output to stdout or syslog.
|
||||
.TP
|
||||
\fB\-o\fR update_parent_dir_stat (default is disable)
|
||||
The parent directory's mtime and ctime are updated when a file or directory is created or deleted (when the parent directory's inode is updated).
|
||||
By default, parent directory statistics are not updated.
|
||||
.SS "utility mode options"
|
||||
.TP
|
||||
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
|
||||
@ -404,9 +462,15 @@ It can be specified as year, month, day, hour, minute, second, and it is express
|
||||
For example, "1Y6M10D12h30m30s".
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync, async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
.TP
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH SERVER URL/REQUEST STYLE
|
||||
Be careful when specifying the server endpoint(URL).
|
||||
.TP
|
||||
If your bucket name contains dots("."), you should use the path request style(using "use_path_request_style" option).
|
||||
.TP
|
||||
Also, if you are using a server other than Amazon S3, you need to specify the endpoint with the "url" option. At that time, depending on the server you are using, you may have to specify the path request style("use_path_request_style" option).
|
||||
.SH LOCAL STORAGE CONSUMPTION
|
||||
.TP
|
||||
s3fs requires local caching for operation. You can enable a local cache with "\-o use_cache" or s3fs uses temporary files to cache pending requests to s3.
|
||||
@ -451,14 +515,16 @@ Enable no object cache ("\-o enable_noobj_cache")
|
||||
If a bucket is used exclusively by an s3fs instance, you can enable the cache for non-existent files and directories with "\-o enable_noobj_cache". This eliminates repeated requests to check the existence of an object, saving time and possibly money.
|
||||
.RE
|
||||
.IP \[bu]
|
||||
Disable support of alternative directory names ("\-o notsup_compat_dir")
|
||||
Enable support of alternative directory names ("\-o compat_dir")
|
||||
.RS
|
||||
.TP
|
||||
s3fs supports "dir/", "dir" and "dir_$folder$" to map directory names to S3 objects and vice versa.
|
||||
s3fs recognizes "dir/" objects as directories. Clients other than s3fs may use "dir", "dir_$folder$" objects as directories, or directory objects may not exist. In order for s3fs to recognize these as directories, you can specify the "compat_dir" option.
|
||||
.RE
|
||||
.IP \[bu]
|
||||
Completion of file and directory information ("\-o complement_stat")
|
||||
.RS
|
||||
.TP
|
||||
Some applications use a different naming schema for associating directory names to S3 objects. For example, Apache Hadoop uses the "dir_$folder$" schema to create S3 objects for directories.
|
||||
.TP
|
||||
The option "\-o notsup_compat_dir" can be set if all accessing tools use the "dir/" naming schema for directory objects and the bucket does not contain any objects with a different naming scheme. In this case, accessing directory objects saves time and possibly money because alternative schemas are not checked.
|
||||
s3fs uses the "x-amz-meta-mode header" to determine if an object is a file or a directory. For this reason, objects that do not have the "x-amz-meta-mode header" may not produce the expected results(The directory cannot be displayed, etc.). By specifying the "complement_stat" option, s3fs can automatically complete this missing attribute information, and you can get the expected results.
|
||||
.RE
|
||||
.SH NOTES
|
||||
.TP
|
||||
|
||||
@ -23,6 +23,9 @@ AM_CPPFLAGS = $(DEPS_CFLAGS)
|
||||
if USE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
endif
|
||||
if USE_SSL_OPENSSL_30
|
||||
AM_CPPFLAGS += -DUSE_OPENSSL_30
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = \
|
||||
s3fs.cpp \
|
||||
@ -32,12 +35,10 @@ s3fs_SOURCES = \
|
||||
s3fs_xml.cpp \
|
||||
metaheader.cpp \
|
||||
mpu_util.cpp \
|
||||
mvnode.cpp \
|
||||
curl.cpp \
|
||||
curl_handlerpool.cpp \
|
||||
curl_multi.cpp \
|
||||
curl_util.cpp \
|
||||
bodydata.cpp \
|
||||
s3objlist.cpp \
|
||||
cache.cpp \
|
||||
string_util.cpp \
|
||||
@ -54,6 +55,7 @@ s3fs_SOURCES = \
|
||||
addhead.cpp \
|
||||
sighandlers.cpp \
|
||||
autolock.cpp \
|
||||
threadpoolman.cpp \
|
||||
common_auth.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
@ -107,6 +109,6 @@ clang-tidy:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
# vim600: noexpandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: noexpandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
@ -20,18 +20,21 @@
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <strings.h>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "addhead.h"
|
||||
#include "curl_util.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Symbols
|
||||
//-------------------------------------------------------------------
|
||||
#define ADD_HEAD_REGEX "reg:"
|
||||
static constexpr char ADD_HEAD_REGEX[] = "reg:";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
@ -62,7 +65,7 @@ AdditionalHeader::~AdditionalHeader()
|
||||
bool AdditionalHeader::Load(const char* file)
|
||||
{
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is NULL.");
|
||||
S3FS_PRN_WARN("file is nullptr.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
@ -75,7 +78,6 @@ bool AdditionalHeader::Load(const char* file)
|
||||
|
||||
// read file
|
||||
std::string line;
|
||||
ADDHEAD *paddhead;
|
||||
while(getline(AH, line)){
|
||||
if(line.empty()){
|
||||
continue;
|
||||
@ -108,49 +110,32 @@ bool AdditionalHeader::Load(const char* file)
|
||||
return false;
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key std::string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key.erase(0, strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
// compile
|
||||
std::unique_ptr<regex_t> preg(new regex_t);
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg.get(), key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg.get(), errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
continue;
|
||||
}
|
||||
|
||||
addheadlist.emplace_back(std::move(preg), key, head, value);
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
addheadlist.emplace_back(nullptr, key, head, value);
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
|
||||
// set flag
|
||||
if(!is_enable){
|
||||
is_enable = true;
|
||||
}
|
||||
is_enable = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -159,16 +144,6 @@ void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
ADDHEAD *paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
addheadlist.clear();
|
||||
}
|
||||
|
||||
@ -178,7 +153,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is NULL.");
|
||||
S3FS_PRN_WARN("path is nullptr.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -190,22 +165,19 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
}
|
||||
const add_header *paddhead = &*iter;
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
if(0 == regexec(paddhead->pregex.get(), path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(paddhead->basestring.empty() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
|
||||
if(paddhead->basestring.empty() || paddhead->basestring == &path[pathlength - paddhead->basestring.length()]){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
@ -243,19 +215,17 @@ bool AdditionalHeader::Dump() const
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
const add_header *paddhead = &*iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << std::endl;
|
||||
ssdbg << " [" << cnt << "] = {" << std::endl;
|
||||
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << std::endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << std::endl;
|
||||
}
|
||||
ssdbg << " base std::string\t--->\t" << paddhead->basestring << std::endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << std::endl;
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << std::endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << std::endl;
|
||||
}
|
||||
ssdbg << " base std::string\t--->\t" << paddhead->basestring << std::endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << std::endl;
|
||||
ssdbg << " }" << std::endl;
|
||||
}
|
||||
|
||||
|
||||
@ -21,21 +21,40 @@
|
||||
#ifndef S3FS_ADDHEAD_H_
|
||||
#define S3FS_ADDHEAD_H_
|
||||
|
||||
#include <memory>
|
||||
#include <regex.h>
|
||||
#include <vector>
|
||||
|
||||
#include "metaheader.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Structure / Typedef
|
||||
//----------------------------------------------
|
||||
typedef struct add_header{
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
struct add_header{
|
||||
add_header(std::unique_ptr<regex_t> pregex, std::string basestring, std::string headkey, std::string headvalue)
|
||||
: pregex(std::move(pregex))
|
||||
, basestring(std::move(basestring))
|
||||
, headkey(std::move(headkey))
|
||||
, headvalue(std::move(headvalue))
|
||||
{}
|
||||
~add_header() {
|
||||
if(pregex){
|
||||
regfree(pregex.get());
|
||||
}
|
||||
}
|
||||
|
||||
add_header(const add_header&) = delete;
|
||||
add_header(add_header&& val) = default;
|
||||
add_header& operator=(const add_header&) = delete;
|
||||
add_header& operator=(add_header&&) = delete;
|
||||
|
||||
std::unique_ptr<regex_t> pregex; // not nullptr means using regex, nullptr means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD;
|
||||
};
|
||||
|
||||
typedef std::vector<ADDHEAD *> addheadlist_t;
|
||||
typedef std::vector<add_header> addheadlist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
@ -50,6 +69,10 @@ class AdditionalHeader
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
AdditionalHeader(const AdditionalHeader&) = delete;
|
||||
AdditionalHeader(AdditionalHeader&&) = delete;
|
||||
AdditionalHeader& operator=(const AdditionalHeader&) = delete;
|
||||
AdditionalHeader& operator=(AdditionalHeader&&) = delete;
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
|
||||
@ -18,13 +18,11 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "autolock.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AutoLock
|
||||
|
||||
@ -40,7 +40,10 @@ class AutoLock
|
||||
bool is_lock_acquired;
|
||||
|
||||
private:
|
||||
AutoLock(const AutoLock&);
|
||||
AutoLock(const AutoLock&) = delete;
|
||||
AutoLock(AutoLock&&) = delete;
|
||||
AutoLock& operator=(const AutoLock&) = delete;
|
||||
AutoLock& operator=(AutoLock&&) = delete;
|
||||
|
||||
public:
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
|
||||
|
||||
122
src/bodydata.cpp
122
src/bodydata.cpp
@ -1,122 +0,0 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "bodydata.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
static const int BODYDATA_RESIZE_APPEND_MIN = 1024;
|
||||
static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024;
|
||||
static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
static size_t adjust_block(size_t bytes, size_t block)
|
||||
{
|
||||
return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class BodyData
|
||||
//-------------------------------------------------------------------
|
||||
bool BodyData::Resize(size_t addbytes)
|
||||
{
|
||||
if(IsSafeSize(addbytes)){
|
||||
return true;
|
||||
}
|
||||
|
||||
// New size
|
||||
size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t));
|
||||
|
||||
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){
|
||||
need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2));
|
||||
}else{
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
|
||||
}
|
||||
// realloc
|
||||
char* newtext;
|
||||
if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){
|
||||
S3FS_PRN_CRIT("not enough memory (realloc returned NULL)");
|
||||
free(text);
|
||||
text = NULL;
|
||||
return false;
|
||||
}
|
||||
text = newtext;
|
||||
bufsize += need_size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void BodyData::Clear()
|
||||
{
|
||||
if(text){
|
||||
free(text);
|
||||
text = NULL;
|
||||
}
|
||||
lastpos = 0;
|
||||
bufsize = 0;
|
||||
}
|
||||
|
||||
bool BodyData::Append(void* ptr, size_t bytes)
|
||||
{
|
||||
if(!ptr){
|
||||
return false;
|
||||
}
|
||||
if(0 == bytes){
|
||||
return true;
|
||||
}
|
||||
if(!Resize(bytes)){
|
||||
return false;
|
||||
}
|
||||
memcpy(&text[lastpos], ptr, bytes);
|
||||
lastpos += bytes;
|
||||
text[lastpos] = '\0';
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const char* BodyData::str() const
|
||||
{
|
||||
if(!text){
|
||||
static const char strnull[] = "";
|
||||
return strnull;
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -1,72 +0,0 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_BODYDATA_H_
|
||||
#define S3FS_BODYDATA_H_
|
||||
|
||||
//----------------------------------------------
|
||||
// Class BodyData
|
||||
//----------------------------------------------
|
||||
// memory class for curl write memory callback
|
||||
//
|
||||
class BodyData
|
||||
{
|
||||
private:
|
||||
char* text;
|
||||
size_t lastpos;
|
||||
size_t bufsize;
|
||||
|
||||
private:
|
||||
bool IsSafeSize(size_t addbytes) const
|
||||
{
|
||||
return ((lastpos + addbytes + 1) > bufsize ? false : true);
|
||||
}
|
||||
bool Resize(size_t addbytes);
|
||||
|
||||
public:
|
||||
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
|
||||
~BodyData()
|
||||
{
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Clear();
|
||||
bool Append(void* ptr, size_t bytes);
|
||||
bool Append(void* ptr, size_t blockSize, size_t numBlocks)
|
||||
{
|
||||
return Append(ptr, (blockSize * numBlocks));
|
||||
}
|
||||
const char* str() const;
|
||||
size_t size() const
|
||||
{
|
||||
return lastpos;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // S3FS_BODYDATA_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
432
src/cache.cpp
432
src/cache.cpp
@ -18,14 +18,13 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cerrno>
|
||||
#include <cstdlib>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "cache.h"
|
||||
#include "autolock.h"
|
||||
@ -85,9 +84,9 @@ struct sort_statiterlist{
|
||||
// ascending order
|
||||
bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const
|
||||
{
|
||||
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date);
|
||||
int result = CompareStatCacheTime(src1->second.cache_date, src2->second.cache_date);
|
||||
if(0 == result){
|
||||
if(src1->second->hit_count < src2->second->hit_count){
|
||||
if(src1->second.hit_count < src2->second.hit_count){
|
||||
result = -1;
|
||||
}
|
||||
}
|
||||
@ -104,9 +103,9 @@ struct sort_symlinkiterlist{
|
||||
// ascending order
|
||||
bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const
|
||||
{
|
||||
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats
|
||||
int result = CompareStatCacheTime(src1->second.cache_date, src2->second.cache_date); // use the same as Stats
|
||||
if(0 == result){
|
||||
if(src1->second->hit_count < src2->second->hit_count){
|
||||
if(src1->second.hit_count < src2->second.hit_count){
|
||||
result = -1;
|
||||
}
|
||||
}
|
||||
@ -123,7 +122,7 @@ pthread_mutex_t StatCache::stat_cache_lock;
|
||||
//-------------------------------------------------------------------
|
||||
// Constructor/Destructor
|
||||
//-------------------------------------------------------------------
|
||||
StatCache::StatCache() : IsExpireTime(true), IsExpireIntervalType(false), ExpireTime(15 * 60), CacheSize(100000), IsCacheNoObject(false)
|
||||
StatCache::StatCache() : IsExpireTime(true), IsExpireIntervalType(false), ExpireTime(15 * 60), CacheSize(100000), IsCacheNoObject(true)
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
stat_cache.clear();
|
||||
@ -205,9 +204,6 @@ void StatCache::Clear()
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
|
||||
delete (*iter).second;
|
||||
}
|
||||
stat_cache.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
}
|
||||
@ -229,13 +225,13 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second){
|
||||
stat_cache_entry* ent = (*iter).second;
|
||||
if(iter != stat_cache.end()){
|
||||
stat_cache_entry* ent = &iter->second;
|
||||
if(0 < ent->notruncate || !IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
|
||||
if(ent->noobjcache){
|
||||
if(!IsCacheNoObject){
|
||||
// need to delete this cache.
|
||||
DelStat(strpath, /*lock_already_held=*/ true);
|
||||
DelStat(strpath, AutoLock::ALREADY_LOCKED);
|
||||
}else{
|
||||
// noobjcache = true means no object.
|
||||
}
|
||||
@ -249,7 +245,7 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
|
||||
std::string tag = lower(hiter->first);
|
||||
if(tag == "etag"){
|
||||
stretag = hiter->second;
|
||||
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
|
||||
if('\0' != petag[0] && petag != stretag){
|
||||
is_delete_cache = true;
|
||||
}
|
||||
break;
|
||||
@ -265,13 +261,13 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
|
||||
S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
|
||||
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
|
||||
|
||||
if(pst!= NULL){
|
||||
if(pst!= nullptr){
|
||||
*pst= ent->stbuf;
|
||||
}
|
||||
if(meta != NULL){
|
||||
if(meta != nullptr){
|
||||
*meta = ent->meta;
|
||||
}
|
||||
if(pisforce != NULL){
|
||||
if(pisforce != nullptr){
|
||||
(*pisforce) = ent->isforce;
|
||||
}
|
||||
ent->hit_count++;
|
||||
@ -289,7 +285,7 @@ bool StatCache::GetStat(const std::string& key, struct stat* pst, headers_t* met
|
||||
}
|
||||
|
||||
if(is_delete_cache){
|
||||
DelStat(strpath, /*lock_already_held=*/ true);
|
||||
DelStat(strpath, AutoLock::ALREADY_LOCKED);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -315,12 +311,12 @@ bool StatCache::IsNoObjectCache(const std::string& key, bool overcheck)
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second) {
|
||||
stat_cache_entry* ent = (*iter).second;
|
||||
if(0 < ent->notruncate || !IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){
|
||||
if((*iter).second->noobjcache){
|
||||
if(iter != stat_cache.end()) {
|
||||
const stat_cache_entry* ent = &iter->second;
|
||||
if(0 < ent->notruncate || !IsExpireTime || !IsExpireStatCacheTime(iter->second.cache_date, ExpireTime)){
|
||||
if(iter->second.noobjcache){
|
||||
// noobjcache = true means no object.
|
||||
SetStatCacheTime((*iter).second->cache_date);
|
||||
SetStatCacheTime((*iter).second.cache_date);
|
||||
return true;
|
||||
}
|
||||
}else{
|
||||
@ -330,81 +326,78 @@ bool StatCache::IsNoObjectCache(const std::string& key, bool overcheck)
|
||||
}
|
||||
|
||||
if(is_delete_cache){
|
||||
DelStat(strpath, /*lock_already_held=*/ true);
|
||||
DelStat(strpath, AutoLock::ALREADY_LOCKED);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StatCache::AddStat(const std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
|
||||
bool StatCache::AddStat(const std::string& key, const headers_t& meta, bool forcedir, bool no_truncate)
|
||||
{
|
||||
if(!no_truncate && CacheSize< 1){
|
||||
return true;
|
||||
}
|
||||
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
|
||||
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = stat_cache.end() != stat_cache.find(key);
|
||||
do_truncate = stat_cache.size() > CacheSize;
|
||||
}
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
if(stat_cache.end() != stat_cache.find(key)){
|
||||
// found cache
|
||||
DelStat(key.c_str(), AutoLock::ALREADY_LOCKED);
|
||||
}else{
|
||||
if(do_truncate){
|
||||
if(!TruncateCache()){
|
||||
// check: need to truncate cache
|
||||
if(stat_cache.size() > CacheSize){
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!TruncateCache(AutoLock::ALREADY_LOCKED)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make new
|
||||
stat_cache_entry* ent = new stat_cache_entry();
|
||||
if(!convert_header_to_stat(key.c_str(), meta, &(ent->stbuf), forcedir)){
|
||||
delete ent;
|
||||
stat_cache_entry ent;
|
||||
if(!convert_header_to_stat(key.c_str(), meta, &ent.stbuf, forcedir)){
|
||||
return false;
|
||||
}
|
||||
ent->hit_count = 0;
|
||||
ent->isforce = forcedir;
|
||||
ent->noobjcache = false;
|
||||
ent->notruncate = (no_truncate ? 1L : 0L);
|
||||
ent->meta.clear();
|
||||
SetStatCacheTime(ent->cache_date); // Set time.
|
||||
ent.hit_count = 0;
|
||||
ent.isforce = forcedir;
|
||||
ent.noobjcache = false;
|
||||
ent.notruncate = (no_truncate ? 1L : 0L);
|
||||
ent.meta.clear();
|
||||
SetStatCacheTime(ent.cache_date); // Set time.
|
||||
//copy only some keys
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
for(headers_t::const_iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
std::string tag = lower(iter->first);
|
||||
std::string value = iter->second;
|
||||
if(tag == "content-type"){
|
||||
ent->meta[iter->first] = value;
|
||||
ent.meta[iter->first] = value;
|
||||
}else if(tag == "content-length"){
|
||||
ent->meta[iter->first] = value;
|
||||
ent.meta[iter->first] = value;
|
||||
}else if(tag == "etag"){
|
||||
ent->meta[iter->first] = value;
|
||||
ent.meta[iter->first] = value;
|
||||
}else if(tag == "last-modified"){
|
||||
ent->meta[iter->first] = value;
|
||||
ent.meta[iter->first] = value;
|
||||
}else if(is_prefix(tag.c_str(), "x-amz")){
|
||||
ent->meta[tag] = value; // key is lower case for "x-amz"
|
||||
ent.meta[tag] = value; // key is lower case for "x-amz"
|
||||
}
|
||||
}
|
||||
|
||||
// add
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
std::pair<stat_cache_t::iterator, bool> pair = stat_cache.insert(std::make_pair(key, ent));
|
||||
if(!pair.second){
|
||||
delete pair.first->second;
|
||||
pair.first->second = ent;
|
||||
}
|
||||
const auto& value = stat_cache[key] = std::move(ent);
|
||||
|
||||
// check symbolic link cache
|
||||
if(!S_ISLNK(ent->stbuf.st_mode)){
|
||||
if(!S_ISLNK(value.stbuf.st_mode)){
|
||||
if(symlink_cache.end() != symlink_cache.find(key)){
|
||||
// if symbolic link cache has key, thus remove it.
|
||||
DelSymlink(key.c_str(), true);
|
||||
DelSymlink(key.c_str(), AutoLock::ALREADY_LOCKED);
|
||||
}
|
||||
}
|
||||
|
||||
// If no_truncate flag is set, set file name to notruncate_file_cache
|
||||
//
|
||||
if(no_truncate){
|
||||
AddNotruncateCache(key);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -412,7 +405,10 @@ bool StatCache::AddStat(const std::string& key, headers_t& meta, bool forcedir,
|
||||
// Updates only meta data if cached data exists.
|
||||
// And when these are updated, it also updates the cache time.
|
||||
//
|
||||
bool StatCache::UpdateMetaStats(const std::string& key, headers_t& meta)
|
||||
// Since the file mode may change while the file is open, it is
|
||||
// updated as well.
|
||||
//
|
||||
bool StatCache::UpdateMetaStats(const std::string& key, const headers_t& meta)
|
||||
{
|
||||
if(CacheSize < 1){
|
||||
return true;
|
||||
@ -421,13 +417,13 @@ bool StatCache::UpdateMetaStats(const std::string& key, headers_t& meta)
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
stat_cache_t::iterator iter = stat_cache.find(key);
|
||||
if(stat_cache.end() == iter || !(iter->second)){
|
||||
if(stat_cache.end() == iter){
|
||||
return true;
|
||||
}
|
||||
stat_cache_entry* ent = iter->second;
|
||||
stat_cache_entry* ent = &iter->second;
|
||||
|
||||
// update only meta keys
|
||||
for(headers_t::iterator metaiter = meta.begin(); metaiter != meta.end(); ++metaiter){
|
||||
for(headers_t::const_iterator metaiter = meta.begin(); metaiter != meta.end(); ++metaiter){
|
||||
std::string tag = lower(metaiter->first);
|
||||
std::string value = metaiter->second;
|
||||
if(tag == "content-type"){
|
||||
@ -446,6 +442,9 @@ bool StatCache::UpdateMetaStats(const std::string& key, headers_t& meta)
|
||||
// Update time.
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
|
||||
// Update only mode
|
||||
ent->stbuf.st_mode = get_mode(meta, key);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -459,47 +458,38 @@ bool StatCache::AddNoObjectCache(const std::string& key)
|
||||
}
|
||||
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
|
||||
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = stat_cache.end() != stat_cache.find(key);
|
||||
do_truncate = stat_cache.size() > CacheSize;
|
||||
}
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
if(stat_cache.end() != stat_cache.find(key)){
|
||||
// found
|
||||
DelStat(key.c_str(), AutoLock::ALREADY_LOCKED);
|
||||
}else{
|
||||
if(do_truncate){
|
||||
if(!TruncateCache()){
|
||||
// check: need to truncate cache
|
||||
if(stat_cache.size() > CacheSize){
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!TruncateCache(AutoLock::ALREADY_LOCKED)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make new
|
||||
stat_cache_entry* ent = new stat_cache_entry();
|
||||
memset(&(ent->stbuf), 0, sizeof(struct stat));
|
||||
ent->hit_count = 0;
|
||||
ent->isforce = false;
|
||||
ent->noobjcache = true;
|
||||
ent->notruncate = 0L;
|
||||
ent->meta.clear();
|
||||
SetStatCacheTime(ent->cache_date); // Set time.
|
||||
stat_cache_entry ent;
|
||||
memset(&ent.stbuf, 0, sizeof(struct stat));
|
||||
ent.hit_count = 0;
|
||||
ent.isforce = false;
|
||||
ent.noobjcache = true;
|
||||
ent.notruncate = 0L;
|
||||
ent.meta.clear();
|
||||
SetStatCacheTime(ent.cache_date); // Set time.
|
||||
|
||||
// add
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
std::pair<stat_cache_t::iterator, bool> pair = stat_cache.insert(std::make_pair(key, ent));
|
||||
if(!pair.second){
|
||||
delete pair.first->second;
|
||||
pair.first->second = ent;
|
||||
}
|
||||
stat_cache[key] = std::move(ent);
|
||||
|
||||
// check symbolic link cache
|
||||
if(symlink_cache.end() != symlink_cache.find(key)){
|
||||
// if symbolic link cache has key, thus remove it.
|
||||
DelSymlink(key.c_str(), true);
|
||||
DelSymlink(key.c_str(), AutoLock::ALREADY_LOCKED);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -510,22 +500,28 @@ void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate)
|
||||
stat_cache_t::iterator iter = stat_cache.find(key);
|
||||
|
||||
if(stat_cache.end() != iter){
|
||||
stat_cache_entry* ent = iter->second;
|
||||
if(ent){
|
||||
if(no_truncate){
|
||||
++(ent->notruncate);
|
||||
}else{
|
||||
if(0L < ent->notruncate){
|
||||
--(ent->notruncate);
|
||||
stat_cache_entry* ent = &iter->second;
|
||||
if(no_truncate){
|
||||
if(0L == ent->notruncate){
|
||||
// need to add no truncate cache.
|
||||
AddNotruncateCache(key);
|
||||
}
|
||||
++(ent->notruncate);
|
||||
}else{
|
||||
if(0L < ent->notruncate){
|
||||
--(ent->notruncate);
|
||||
if(0L == ent->notruncate){
|
||||
// need to delete from no truncate cache.
|
||||
DelNotruncateCache(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool StatCache::TruncateCache()
|
||||
bool StatCache::TruncateCache(AutoLock::Type locktype)
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock, locktype);
|
||||
|
||||
if(stat_cache.empty()){
|
||||
return true;
|
||||
@ -534,10 +530,9 @@ bool StatCache::TruncateCache()
|
||||
// 1) erase over expire time
|
||||
if(IsExpireTime){
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
|
||||
stat_cache_entry* entry = iter->second;
|
||||
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
|
||||
delete entry;
|
||||
stat_cache.erase(iter++);
|
||||
const stat_cache_entry* entry = &iter->second;
|
||||
if(0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime)){
|
||||
iter = stat_cache.erase(iter);
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
@ -554,8 +549,8 @@ bool StatCache::TruncateCache()
|
||||
statiterlist_t erase_iters;
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){
|
||||
// check no truncate
|
||||
stat_cache_entry* ent = iter->second;
|
||||
if(ent && 0L < ent->notruncate){
|
||||
const stat_cache_entry* ent = &iter->second;
|
||||
if(0L < ent->notruncate){
|
||||
// skip for no truncate entry and keep extra counts for this entity.
|
||||
if(0 < erase_count){
|
||||
--erase_count; // decrement
|
||||
@ -565,7 +560,7 @@ bool StatCache::TruncateCache()
|
||||
erase_iters.push_back(iter);
|
||||
}
|
||||
if(erase_count < erase_iters.size()){
|
||||
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
|
||||
std::sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
|
||||
while(erase_count < erase_iters.size()){
|
||||
erase_iters.pop_back();
|
||||
}
|
||||
@ -575,7 +570,6 @@ bool StatCache::TruncateCache()
|
||||
stat_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
|
||||
delete siter->second;
|
||||
stat_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -583,19 +577,19 @@ bool StatCache::TruncateCache()
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::DelStat(const char* key, bool lock_already_held)
|
||||
bool StatCache::DelStat(const char* key, AutoLock::Type locktype)
|
||||
{
|
||||
if(!key){
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
AutoLock lock(&StatCache::stat_cache_lock, locktype);
|
||||
|
||||
stat_cache_t::iterator iter;
|
||||
if(stat_cache.end() != (iter = stat_cache.find(std::string(key)))){
|
||||
delete (*iter).second;
|
||||
if(stat_cache.end() != (iter = stat_cache.find(key))){
|
||||
stat_cache.erase(iter);
|
||||
DelNotruncateCache(key);
|
||||
}
|
||||
if(0 < strlen(key) && 0 != strcmp(key, "/")){
|
||||
std::string strpath = key;
|
||||
@ -607,8 +601,8 @@ bool StatCache::DelStat(const char* key, bool lock_already_held)
|
||||
strpath += "/";
|
||||
}
|
||||
if(stat_cache.end() != (iter = stat_cache.find(strpath))){
|
||||
delete (*iter).second;
|
||||
stat_cache.erase(iter);
|
||||
DelNotruncateCache(strpath);
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -624,8 +618,8 @@ bool StatCache::GetSymlink(const std::string& key, std::string& value)
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
symlink_cache_t::iterator iter = symlink_cache.find(strpath);
|
||||
if(iter != symlink_cache.end() && iter->second){
|
||||
symlink_cache_entry* ent = iter->second;
|
||||
if(iter != symlink_cache.end()){
|
||||
symlink_cache_entry* ent = &iter->second;
|
||||
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats
|
||||
// found
|
||||
S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
|
||||
@ -645,7 +639,7 @@ bool StatCache::GetSymlink(const std::string& key, std::string& value)
|
||||
}
|
||||
|
||||
if(is_delete_cache){
|
||||
DelSymlink(strpath.c_str(), /*lock_already_held=*/ true);
|
||||
DelSymlink(strpath.c_str(), AutoLock::ALREADY_LOCKED);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -657,45 +651,36 @@ bool StatCache::AddSymlink(const std::string& key, const std::string& value)
|
||||
}
|
||||
S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str());
|
||||
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = symlink_cache.end() != symlink_cache.find(key);
|
||||
do_truncate = symlink_cache.size() > CacheSize;
|
||||
}
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(found){
|
||||
DelSymlink(key.c_str());
|
||||
if(symlink_cache.end() != symlink_cache.find(key)){
|
||||
// found
|
||||
DelSymlink(key.c_str(), AutoLock::ALREADY_LOCKED);
|
||||
}else{
|
||||
if(do_truncate){
|
||||
if(!TruncateSymlink()){
|
||||
// check: need to truncate cache
|
||||
if(symlink_cache.size() > CacheSize){
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!TruncateSymlink(AutoLock::ALREADY_LOCKED)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make new
|
||||
symlink_cache_entry* ent = new symlink_cache_entry();
|
||||
ent->link = value;
|
||||
ent->hit_count = 0;
|
||||
SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats).
|
||||
symlink_cache_entry ent;
|
||||
ent.link = value;
|
||||
ent.hit_count = 0;
|
||||
SetStatCacheTime(ent.cache_date); // Set time(use the same as Stats).
|
||||
|
||||
// add
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
std::pair<symlink_cache_t::iterator, bool> pair = symlink_cache.insert(std::make_pair(key, ent));
|
||||
if(!pair.second){
|
||||
delete pair.first->second;
|
||||
pair.first->second = ent;
|
||||
}
|
||||
symlink_cache[key] = std::move(ent);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::TruncateSymlink()
|
||||
bool StatCache::TruncateSymlink(AutoLock::Type locktype)
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock, locktype);
|
||||
|
||||
if(symlink_cache.empty()){
|
||||
return true;
|
||||
@ -704,10 +689,9 @@ bool StatCache::TruncateSymlink()
|
||||
// 1) erase over expire time
|
||||
if(IsExpireTime){
|
||||
for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){
|
||||
symlink_cache_entry* entry = iter->second;
|
||||
if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats
|
||||
delete entry;
|
||||
symlink_cache.erase(iter++);
|
||||
const symlink_cache_entry* entry = &iter->second;
|
||||
if(IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats
|
||||
iter = symlink_cache.erase(iter);
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
@ -733,7 +717,6 @@ bool StatCache::TruncateSymlink()
|
||||
symlink_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str());
|
||||
delete siter->second;
|
||||
symlink_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -741,18 +724,17 @@ bool StatCache::TruncateSymlink()
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::DelSymlink(const char* key, bool lock_already_held)
|
||||
bool StatCache::DelSymlink(const char* key, AutoLock::Type locktype)
|
||||
{
|
||||
if(!key){
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key);
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
AutoLock lock(&StatCache::stat_cache_lock, locktype);
|
||||
|
||||
symlink_cache_t::iterator iter;
|
||||
if(symlink_cache.end() != (iter = symlink_cache.find(std::string(key)))){
|
||||
delete iter->second;
|
||||
if(symlink_cache.end() != (iter = symlink_cache.find(key))){
|
||||
symlink_cache.erase(iter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
@ -760,6 +742,116 @@ bool StatCache::DelSymlink(const char* key, bool lock_already_held)
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// Need to lock StatCache::stat_cache_lock before calling this method.
|
||||
//
|
||||
bool StatCache::AddNotruncateCache(const std::string& key)
|
||||
{
|
||||
if(key.empty() || '/' == *key.rbegin()){
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string parentdir = mydirname(key);
|
||||
std::string filename = mybasename(key);
|
||||
if(parentdir.empty() || filename.empty()){
|
||||
return false;
|
||||
}
|
||||
parentdir += '/'; // directory path must be '/' termination.
|
||||
|
||||
notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(parentdir);
|
||||
if(iter == notruncate_file_cache.end()){
|
||||
// add new list
|
||||
notruncate_filelist_t list;
|
||||
list.push_back(filename);
|
||||
notruncate_file_cache[parentdir] = list;
|
||||
}else{
|
||||
// add filename to existed list
|
||||
notruncate_filelist_t& filelist = iter->second;
|
||||
notruncate_filelist_t::const_iterator fiter = std::find(filelist.begin(), filelist.end(), filename);
|
||||
if(fiter == filelist.end()){
|
||||
filelist.push_back(filename);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// Need to lock StatCache::stat_cache_lock before calling this method.
|
||||
//
|
||||
bool StatCache::DelNotruncateCache(const std::string& key)
|
||||
{
|
||||
if(key.empty() || '/' == *key.rbegin()){
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string parentdir = mydirname(key);
|
||||
std::string filename = mybasename(key);
|
||||
if(parentdir.empty() || filename.empty()){
|
||||
return false;
|
||||
}
|
||||
parentdir += '/'; // directory path must be '/' termination.
|
||||
|
||||
notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(parentdir);
|
||||
if(iter != notruncate_file_cache.end()){
|
||||
// found directory in map
|
||||
notruncate_filelist_t& filelist = iter->second;
|
||||
notruncate_filelist_t::iterator fiter = std::find(filelist.begin(), filelist.end(), filename);
|
||||
if(fiter != filelist.end()){
|
||||
// found filename in directory file list
|
||||
filelist.erase(fiter);
|
||||
if(filelist.empty()){
|
||||
notruncate_file_cache.erase(parentdir);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [Background]
|
||||
// When s3fs creates a new file, the file does not exist until the file contents
|
||||
// are uploaded.(because it doesn't create a 0 byte file)
|
||||
// From the time this file is created(opened) until it is uploaded(flush), it
|
||||
// will have a Stat cache with the No truncate flag added.
|
||||
// This avoids file not existing errors in operations such as chmod and utimens
|
||||
// that occur in the short period before file upload.
|
||||
// Besides this, we also need to support readdir(list_bucket), this method is
|
||||
// called to maintain the cache for readdir and return its value.
|
||||
//
|
||||
// [NOTE]
|
||||
// Add the file names under parentdir to the list.
|
||||
// However, if the same file name exists in the list, it will not be added.
|
||||
// parentdir must be terminated with a '/'.
|
||||
//
|
||||
bool StatCache::GetNotruncateCache(const std::string& parentdir, notruncate_filelist_t& list)
|
||||
{
|
||||
if(parentdir.empty()){
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string dirpath = parentdir;
|
||||
if('/' != *dirpath.rbegin()){
|
||||
dirpath += '/';
|
||||
}
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
notruncate_dir_map_t::iterator iter = notruncate_file_cache.find(dirpath);
|
||||
if(iter == notruncate_file_cache.end()){
|
||||
// not found directory map
|
||||
return true;
|
||||
}
|
||||
|
||||
// found directory in map
|
||||
const notruncate_filelist_t& filelist = iter->second;
|
||||
for(notruncate_filelist_t::const_iterator fiter = filelist.begin(); fiter != filelist.end(); ++fiter){
|
||||
if(list.end() == std::find(list.begin(), list.end(), *fiter)){
|
||||
// found notuncate file that does not exist in the list, so add it.
|
||||
list.push_back(*fiter);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
@ -790,13 +882,7 @@ bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat
|
||||
mtime.tv_sec = 0;
|
||||
mtime.tv_nsec = 0;
|
||||
}
|
||||
#if defined(__APPLE__)
|
||||
pst->st_mtime = mtime.tv_sec;
|
||||
pst->st_mtimespec.tv_nsec = mtime.tv_nsec;
|
||||
#else
|
||||
pst->st_mtim.tv_sec = mtime.tv_sec;
|
||||
pst->st_mtim.tv_nsec = mtime.tv_nsec;
|
||||
#endif
|
||||
set_timespec_to_stat(*pst, stat_time_type::MTIME, mtime);
|
||||
}
|
||||
|
||||
// ctime
|
||||
@ -808,13 +894,7 @@ bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat
|
||||
ctime.tv_sec = 0;
|
||||
ctime.tv_nsec = 0;
|
||||
}
|
||||
#if defined(__APPLE__)
|
||||
pst->st_ctime = ctime.tv_sec;
|
||||
pst->st_ctimespec.tv_nsec = ctime.tv_nsec;
|
||||
#else
|
||||
pst->st_ctim.tv_sec = ctime.tv_sec;
|
||||
pst->st_ctim.tv_nsec = ctime.tv_nsec;
|
||||
#endif
|
||||
set_timespec_to_stat(*pst, stat_time_type::CTIME, ctime);
|
||||
}
|
||||
|
||||
// atime
|
||||
@ -826,17 +906,15 @@ bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat
|
||||
atime.tv_sec = 0;
|
||||
atime.tv_nsec = 0;
|
||||
}
|
||||
#if defined(__APPLE__)
|
||||
pst->st_atime = atime.tv_sec;
|
||||
pst->st_atimespec.tv_nsec = atime.tv_nsec;
|
||||
#else
|
||||
pst->st_atim.tv_sec = atime.tv_sec;
|
||||
pst->st_atim.tv_nsec = atime.tv_nsec;
|
||||
#endif
|
||||
set_timespec_to_stat(*pst, stat_time_type::ATIME, atime);
|
||||
}
|
||||
|
||||
// size
|
||||
pst->st_size = get_size(meta);
|
||||
if(S_ISDIR(pst->st_mode)){
|
||||
pst->st_size = 4096;
|
||||
}else{
|
||||
pst->st_size = get_size(meta);
|
||||
}
|
||||
|
||||
// uid/gid
|
||||
pst->st_uid = get_uid(meta);
|
||||
|
||||
52
src/cache.h
52
src/cache.h
@ -21,6 +21,9 @@
|
||||
#ifndef S3FS_CACHE_H_
|
||||
#define S3FS_CACHE_H_
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "autolock.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -47,7 +50,7 @@ struct stat_cache_entry {
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
typedef std::map<std::string, stat_cache_entry> stat_cache_t; // key=path
|
||||
|
||||
//
|
||||
// Struct for symbolic link cache
|
||||
@ -64,7 +67,13 @@ struct symlink_cache_entry {
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
|
||||
typedef std::map<std::string, symlink_cache_entry> symlink_cache_t;
|
||||
|
||||
//
|
||||
// Typedefs for No truncate file name cache
|
||||
//
|
||||
typedef std::vector<std::string> notruncate_filelist_t; // untruncated file name list in dir
|
||||
typedef std::map<std::string, notruncate_filelist_t> notruncate_dir_map_t; // key is parent dir path
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class StatCache
|
||||
@ -90,6 +99,7 @@ class StatCache
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
symlink_cache_t symlink_cache;
|
||||
notruncate_dir_map_t notruncate_file_cache;
|
||||
|
||||
private:
|
||||
StatCache();
|
||||
@ -98,9 +108,12 @@ class StatCache
|
||||
void Clear();
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache();
|
||||
bool TruncateCache(AutoLock::Type locktype = AutoLock::NONE);
|
||||
// Truncate symbolic link cache
|
||||
bool TruncateSymlink();
|
||||
bool TruncateSymlink(AutoLock::Type locktype = AutoLock::NONE);
|
||||
|
||||
bool AddNotruncateCache(const std::string& key);
|
||||
bool DelNotruncateCache(const std::string& key);
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
@ -130,25 +143,29 @@ class StatCache
|
||||
}
|
||||
|
||||
// Get stat cache
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL)
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = nullptr)
|
||||
{
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
return GetStat(key, pst, meta, overcheck, nullptr, pisforce);
|
||||
}
|
||||
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
return GetStat(key, pst, nullptr, overcheck, nullptr, nullptr);
|
||||
}
|
||||
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
return GetStat(key, nullptr, meta, overcheck, nullptr, nullptr);
|
||||
}
|
||||
bool HasStat(const std::string& key, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
return GetStat(key, nullptr, nullptr, overcheck, nullptr, nullptr);
|
||||
}
|
||||
bool HasStat(const std::string& key, const char* etag, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
return GetStat(key, nullptr, nullptr, overcheck, etag, nullptr);
|
||||
}
|
||||
bool HasStat(const std::string& key, struct stat* pst, const char* etag)
|
||||
{
|
||||
return GetStat(key, pst, nullptr, true, etag, nullptr);
|
||||
}
|
||||
|
||||
// Cache For no object
|
||||
@ -156,25 +173,28 @@ class StatCache
|
||||
bool AddNoObjectCache(const std::string& key);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
bool AddStat(const std::string& key, const headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
|
||||
// Update meta stats
|
||||
bool UpdateMetaStats(const std::string& key, headers_t& meta);
|
||||
bool UpdateMetaStats(const std::string& key, const headers_t& meta);
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key, bool lock_already_held = false);
|
||||
bool DelStat(const std::string& key, bool lock_already_held = false)
|
||||
bool DelStat(const char* key, AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool DelStat(const std::string& key, AutoLock::Type locktype = AutoLock::NONE)
|
||||
{
|
||||
return DelStat(key.c_str(), lock_already_held);
|
||||
return DelStat(key.c_str(), locktype);
|
||||
}
|
||||
|
||||
// Cache for symbolic link
|
||||
bool GetSymlink(const std::string& key, std::string& value);
|
||||
bool AddSymlink(const std::string& key, const std::string& value);
|
||||
bool DelSymlink(const char* key, bool lock_already_held = false);
|
||||
bool DelSymlink(const char* key, AutoLock::Type locktype = AutoLock::NONE);
|
||||
|
||||
// Cache for Notruncate file
|
||||
bool GetNotruncateCache(const std::string& parentdir, notruncate_filelist_t& list);
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
12
src/common.h
12
src/common.h
@ -21,18 +21,17 @@
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "../config.h"
|
||||
#include "types.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: namespace these
|
||||
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
static const off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
static constexpr int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
static constexpr off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
@ -47,6 +46,11 @@ extern std::string endpoint;
|
||||
extern std::string cipher_suites;
|
||||
extern std::string instance_name;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// For weak attribute
|
||||
//-------------------------------------------------------------------
|
||||
#define S3FS_FUNCATTR_WEAK __attribute__ ((weak,unused))
|
||||
|
||||
#endif // S3FS_COMMON_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -18,14 +18,9 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <climits>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
@ -34,36 +29,24 @@
|
||||
//-------------------------------------------------------------------
|
||||
std::string s3fs_get_content_md5(int fd)
|
||||
{
|
||||
unsigned char* md5;
|
||||
char* base64;
|
||||
std::string Signature;
|
||||
|
||||
if(NULL == (md5 = s3fs_md5_fd(fd, 0, -1))){
|
||||
return std::string("");
|
||||
md5_t md5;
|
||||
if(!s3fs_md5_fd(fd, 0, -1, &md5)){
|
||||
// TODO: better return value?
|
||||
return "";
|
||||
}
|
||||
if(NULL == (base64 = s3fs_base64(md5, get_md5_digest_length()))){
|
||||
delete[] md5;
|
||||
return std::string(""); // ENOMEM
|
||||
}
|
||||
delete[] md5;
|
||||
|
||||
Signature = base64;
|
||||
delete[] base64;
|
||||
|
||||
return Signature;
|
||||
return s3fs_base64(md5.data(), md5.size());
|
||||
}
|
||||
|
||||
std::string s3fs_sha256_hex_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
unsigned char* sha256;
|
||||
sha256_t sha256;
|
||||
|
||||
if(NULL == (sha256 = s3fs_sha256_fd(fd, start, size))){
|
||||
return std::string("");
|
||||
if(!s3fs_sha256_fd(fd, start, size, &sha256)){
|
||||
// TODO: better return value?
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string sha256hex = s3fs_hex_lower(sha256, digestlen);
|
||||
delete[] sha256;
|
||||
std::string sha256hex = s3fs_hex_lower(sha256.data(), sha256.size());
|
||||
|
||||
return sha256hex;
|
||||
}
|
||||
|
||||
1180
src/curl.cpp
1180
src/curl.cpp
File diff suppressed because it is too large
Load Diff
111
src/curl.h
111
src/curl.h
@ -21,20 +21,14 @@
|
||||
#ifndef S3FS_CURL_H_
|
||||
#define S3FS_CURL_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <curl/curl.h>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <strings.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "curl_handlerpool.h"
|
||||
#include "bodydata.h"
|
||||
#include "psemaphore.h"
|
||||
#include "autolock.h"
|
||||
#include "metaheader.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "s3fs_cred.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Avoid dependency on libcurl version
|
||||
@ -45,7 +39,7 @@
|
||||
// CURLOPT_SSL_ENABLE_ALPN 7.36.0 and later
|
||||
// CURLOPT_KEEP_SENDING_ON_ERROR 7.51.0 and later
|
||||
//
|
||||
// s3fs uses these, if you build s3fs with the old libcurl,
|
||||
// s3fs uses these, if you build s3fs with the old libcurl,
|
||||
// substitute the following symbols to avoid errors.
|
||||
// If the version of libcurl linked at runtime is old,
|
||||
// curl_easy_setopt results in an error(CURLE_UNKNOWN_OPTION) and
|
||||
@ -79,13 +73,16 @@ typedef std::map<CURL*, progress_t> curlprogress_t;
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
class CurlHandlerPool;
|
||||
class S3fsCred;
|
||||
class S3fsCurl;
|
||||
class Semaphore;
|
||||
|
||||
// Prototype function for lazy setup options for curl handle
|
||||
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
|
||||
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
typedef std::vector<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
@ -94,23 +91,23 @@ class S3fsCurl
|
||||
friend class S3fsMultiCurl;
|
||||
|
||||
private:
|
||||
enum REQTYPE {
|
||||
REQTYPE_UNSET = -1,
|
||||
REQTYPE_DELETE = 0,
|
||||
REQTYPE_HEAD,
|
||||
REQTYPE_PUTHEAD,
|
||||
REQTYPE_PUT,
|
||||
REQTYPE_GET,
|
||||
REQTYPE_CHKBUCKET,
|
||||
REQTYPE_LISTBUCKET,
|
||||
REQTYPE_PREMULTIPOST,
|
||||
REQTYPE_COMPLETEMULTIPOST,
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD,
|
||||
REQTYPE_IAMROLE
|
||||
enum class REQTYPE {
|
||||
UNSET = -1,
|
||||
DELETE = 0,
|
||||
HEAD,
|
||||
PUTHEAD,
|
||||
PUT,
|
||||
GET,
|
||||
CHKBUCKET,
|
||||
LISTBUCKET,
|
||||
PREMULTIPOST,
|
||||
COMPLETEMULTIPOST,
|
||||
UPLOADMULTIPOST,
|
||||
COPYMULTIPOST,
|
||||
MULTILIST,
|
||||
IAMCRED,
|
||||
ABORTMULTIUPLOAD,
|
||||
IAMROLE
|
||||
};
|
||||
|
||||
// class variables
|
||||
@ -156,6 +153,9 @@ class S3fsCurl
|
||||
static bool is_ua; // User-Agent
|
||||
static bool listobjectsv2;
|
||||
static bool requester_pays;
|
||||
static std::string proxy_url;
|
||||
static bool proxy_http;
|
||||
static std::string proxy_userpwd; // load from file(<username>:<passphrase>)
|
||||
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
@ -166,8 +166,8 @@ class S3fsCurl
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData bodydata; // body data by WriteMemoryCallback
|
||||
BodyData headdata; // header data by WriteMemoryCallback
|
||||
std::string bodydata; // body data by WriteMemoryCallback
|
||||
std::string headdata; // header data by WriteMemoryCallback
|
||||
long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
off_t postdata_remaining; // use by post method and read callback function.
|
||||
@ -191,16 +191,20 @@ class S3fsCurl
|
||||
std::vector<pthread_t> *completed_tids;
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
CURLcode curlCode; // handle curl return
|
||||
|
||||
|
||||
public:
|
||||
static const long S3FSCURL_RESPONSECODE_NOTSET = -1;
|
||||
static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
|
||||
static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
|
||||
static constexpr long S3FSCURL_RESPONSECODE_NOTSET = -1;
|
||||
static constexpr long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
|
||||
static constexpr int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
S3fsCurl(const S3fsCurl&) = delete;
|
||||
S3fsCurl(S3fsCurl&&) = delete;
|
||||
S3fsCurl& operator=(const S3fsCurl&) = delete;
|
||||
S3fsCurl& operator=(S3fsCurl&&) = delete;
|
||||
|
||||
private:
|
||||
// class methods
|
||||
@ -221,16 +225,15 @@ class S3fsCurl
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
|
||||
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl, void* param);
|
||||
static std::unique_ptr<S3fsCurl> UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static std::unique_ptr<S3fsCurl> CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static std::unique_ptr<S3fsCurl> MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static std::unique_ptr<S3fsCurl> ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
|
||||
// lazy functions for set curl options
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
@ -246,20 +249,20 @@ class S3fsCurl
|
||||
static int RawCurlDebugFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
|
||||
|
||||
// methods
|
||||
bool ResetHandle(bool lock_already_held = false);
|
||||
bool ResetHandle(AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool RemakeHandle();
|
||||
bool ClearInternalData();
|
||||
void insertV4Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
|
||||
void insertV2Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
|
||||
void insertIBMIAMHeaders(const std::string& access_key_id, const std::string& access_token);
|
||||
void insertAuthHeaders();
|
||||
bool AddSseRequestHead(sse_type_t ssetype, const std::string& ssevalue, bool is_copy);
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource, const std::string& secret_access_key, const std::string& access_token);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601, const std::string& secret_access_key, const std::string& access_token);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
bool CopyMultipartPostComplete();
|
||||
bool MixMultipartPostComplete();
|
||||
int MapPutErrorResponse(int result);
|
||||
|
||||
public:
|
||||
@ -268,10 +271,14 @@ class S3fsCurl
|
||||
static bool InitCredentialObject(S3fsCred* pcredobj);
|
||||
static bool InitMimeType(const std::string& strFile);
|
||||
static bool DestroyS3fsCurl();
|
||||
static std::unique_ptr<S3fsCurl> CreateParallelS3fsCurl(const char* tpath, int fd, off_t start, off_t size, int part_num, bool is_copy, etagpair* petag, const std::string& upload_id, int& result);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, off_t size);
|
||||
|
||||
// lazy functions for set curl options(public)
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
@ -331,19 +338,20 @@ class S3fsCurl
|
||||
static bool IsListObjectsV2() { return S3fsCurl::listobjectsv2; }
|
||||
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
|
||||
static bool IsRequesterPays() { return S3fsCurl::requester_pays; }
|
||||
static bool SetProxy(const char* url);
|
||||
static bool SetProxyUserPwd(const char* userpwd);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true, AutoLock::Type locktype = AutoLock::NONE);
|
||||
|
||||
bool GetIAMCredentials(const char* cred_url, const char* iam_v2_token, const char* ibm_secret_access_key, std::string& response);
|
||||
bool GetIAMRoleFromMetaData(const char* cred_url, const char* iam_v2_token, std::string& token);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, const std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true) const;
|
||||
int RequestPerform(bool dontAddAuthHeaders=false);
|
||||
int DeleteRequest(const char* tpath);
|
||||
int GetIAMv2ApiToken(const char* token_url, int token_ttl, const char* token_ttl_hdr, std::string& response);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, size_t ssekey_pos = -1);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = nullptr, const char* savedpath = nullptr, size_t ssekey_pos = -1);
|
||||
bool PreHeadRequest(const std::string& tpath, const std::string& bpath, const std::string& savedpath, size_t ssekey_pos = -1) {
|
||||
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
|
||||
}
|
||||
@ -352,11 +360,12 @@ class S3fsCurl
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, off_t size = -1);
|
||||
int CheckBucket();
|
||||
int CheckBucket(const char* check_path, bool compat_dir, bool force_no_sse);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
bool MixMultipartPostComplete();
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
@ -370,9 +379,9 @@ class S3fsCurl
|
||||
std::string GetSpecialSavedPath() const { return saved_path; }
|
||||
std::string GetUrl() const { return url; }
|
||||
std::string GetOp() const { return op; }
|
||||
headers_t* GetResponseHeaders() { return &responseHeaders; }
|
||||
BodyData* GetBodyData() { return &bodydata; }
|
||||
BodyData* GetHeadData() { return &headdata; }
|
||||
const headers_t* GetResponseHeaders() const { return &responseHeaders; }
|
||||
const std::string* GetBodyData() const { return &bodydata; }
|
||||
const std::string* GetHeadData() const { return &headdata; }
|
||||
CURLcode GetCurlCode() const { return curlCode; }
|
||||
long GetLastResponseCode() const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
|
||||
@ -19,10 +19,8 @@
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "curl_handlerpool.h"
|
||||
#include "autolock.h"
|
||||
|
||||
@ -55,11 +53,15 @@ bool CurlHandlerPool::Init()
|
||||
|
||||
bool CurlHandlerPool::Destroy()
|
||||
{
|
||||
while(!mPool.empty()){
|
||||
CURL* hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
if(hCurl){
|
||||
curl_easy_cleanup(hCurl);
|
||||
{
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
while(!mPool.empty()){
|
||||
CURL* hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
if(hCurl){
|
||||
curl_easy_cleanup(hCurl);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (0 != pthread_mutex_destroy(&mLock)) {
|
||||
@ -71,15 +73,14 @@ bool CurlHandlerPool::Destroy()
|
||||
|
||||
CURL* CurlHandlerPool::GetHandler(bool only_pool)
|
||||
{
|
||||
CURL* hCurl = NULL;
|
||||
{
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
if(!mPool.empty()){
|
||||
hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
|
||||
}
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
CURL* hCurl = nullptr;
|
||||
|
||||
if(!mPool.empty()){
|
||||
hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
|
||||
}
|
||||
if(only_pool){
|
||||
return hCurl;
|
||||
@ -96,14 +97,13 @@ void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
|
||||
if(!hCurl){
|
||||
return;
|
||||
}
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
if(restore_pool){
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
S3FS_PRN_DBG("Return handler to pool");
|
||||
mPool.push_back(hCurl);
|
||||
|
||||
while(mMaxHandlers <= static_cast<int>(mPool.size())){
|
||||
while(mMaxHandlers < static_cast<int>(mPool.size())){
|
||||
CURL* hOldCurl = mPool.front();
|
||||
mPool.pop_front();
|
||||
if(hOldCurl){
|
||||
@ -117,6 +117,16 @@ void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
|
||||
}
|
||||
}
|
||||
|
||||
void CurlHandlerPool::ResetHandler(CURL* hCurl)
|
||||
{
|
||||
if(!hCurl){
|
||||
return;
|
||||
}
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
curl_easy_reset(hCurl);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
|
||||
#include <cassert>
|
||||
#include <curl/curl.h>
|
||||
#include <list>
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedefs
|
||||
@ -39,12 +40,17 @@ class CurlHandlerPool
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
CurlHandlerPool(const CurlHandlerPool&) = delete;
|
||||
CurlHandlerPool(CurlHandlerPool&&) = delete;
|
||||
CurlHandlerPool& operator=(const CurlHandlerPool&) = delete;
|
||||
CurlHandlerPool& operator=(CurlHandlerPool&&) = delete;
|
||||
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler(bool only_pool);
|
||||
void ReturnHandler(CURL* hCurl, bool restore_pool);
|
||||
void ResetHandler(CURL* hCurl);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
|
||||
@ -21,17 +21,19 @@
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "curl_multi.h"
|
||||
#include "curl.h"
|
||||
#include "autolock.h"
|
||||
#include "psemaphore.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsMultiCurl
|
||||
//-------------------------------------------------------------------
|
||||
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism), SuccessCallback(NULL), RetryCallback(NULL)
|
||||
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism, bool not_abort) : maxParallelism(maxParallelism), not_abort(not_abort), SuccessCallback(nullptr), NotFoundCallback(nullptr), RetryCallback(nullptr), pSuccessCallbackParam(nullptr), pNotFoundCallbackParam(nullptr)
|
||||
{
|
||||
int result;
|
||||
pthread_mutexattr_t attr;
|
||||
@ -58,19 +60,17 @@ bool S3fsMultiCurl::ClearEx(bool is_all)
|
||||
{
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
if(s3fscurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl; // with destroy curl handle.
|
||||
}
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(is_all){
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
@ -86,20 +86,41 @@ S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallb
|
||||
SuccessCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
|
||||
S3fsMultiNotFoundCallback S3fsMultiCurl::SetNotFoundCallback(S3fsMultiNotFoundCallback function)
|
||||
{
|
||||
S3fsMultiNotFoundCallback old = NotFoundCallback;
|
||||
NotFoundCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function)
|
||||
{
|
||||
S3fsMultiRetryCallback old = RetryCallback;
|
||||
RetryCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
||||
|
||||
void* S3fsMultiCurl::SetSuccessCallbackParam(void* param)
|
||||
{
|
||||
void* old = pSuccessCallbackParam;
|
||||
pSuccessCallbackParam = param;
|
||||
return old;
|
||||
}
|
||||
|
||||
void* S3fsMultiCurl::SetNotFoundCallbackParam(void* param)
|
||||
{
|
||||
void* old = pNotFoundCallbackParam;
|
||||
pNotFoundCallbackParam = param;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(std::unique_ptr<S3fsCurl> s3fscurl)
|
||||
{
|
||||
if(!s3fscurl){
|
||||
return false;
|
||||
}
|
||||
clist_all.push_back(s3fscurl);
|
||||
clist_all.push_back(std::move(s3fscurl));
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -114,7 +135,7 @@ int S3fsMultiCurl::MultiPerform()
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
|
||||
pthread_t thread;
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
if(!s3fscurl){
|
||||
continue;
|
||||
}
|
||||
@ -131,9 +152,9 @@ int S3fsMultiCurl::MultiPerform()
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc));
|
||||
} else {
|
||||
int int_retval = (int)(intptr_t)(retval);
|
||||
long int_retval = reinterpret_cast<long>(retval);
|
||||
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
|
||||
S3FS_PRN_WARN("thread terminated with non-zero return code: %d", int_retval);
|
||||
S3FS_PRN_WARN("thread terminated with non-zero return code: %ld", int_retval);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -145,7 +166,7 @@ int S3fsMultiCurl::MultiPerform()
|
||||
|
||||
isMultiHead |= s3fscurl->GetOp() == "HEAD";
|
||||
|
||||
rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
|
||||
rc = pthread_create(&thread, nullptr, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
|
||||
if (rc != 0) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc);
|
||||
@ -167,9 +188,9 @@ int S3fsMultiCurl::MultiPerform()
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc);
|
||||
} else {
|
||||
int int_retval = (int)(intptr_t)(retval);
|
||||
long int_retval = reinterpret_cast<long>(retval);
|
||||
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
|
||||
S3FS_PRN_WARN("thread terminated with non-zero return code: %d", int_retval);
|
||||
S3FS_PRN_WARN("thread terminated with non-zero return code: %ld", int_retval);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -183,7 +204,7 @@ int S3fsMultiCurl::MultiRead()
|
||||
int result = 0;
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
std::unique_ptr<S3fsCurl> s3fscurl(std::move(*iter));
|
||||
|
||||
bool isRetry = false;
|
||||
bool isPostpone = false;
|
||||
@ -197,8 +218,10 @@ int S3fsMultiCurl::MultiRead()
|
||||
isPostpone = true;
|
||||
}else if(400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str());
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownPointerToBool
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl.get(), pSuccessCallbackParam)){
|
||||
S3FS_PRN_WARN("error from success callback function(%s).", s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(400 == responseCode){
|
||||
// as possibly in multipart
|
||||
@ -209,6 +232,12 @@ int S3fsMultiCurl::MultiRead()
|
||||
// HEAD requests on readdir_multi_head can return 404
|
||||
if(s3fscurl->GetOp() != "HEAD"){
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
}
|
||||
// Call callback function
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownPointerToBool
|
||||
if(NotFoundCallback && !NotFoundCallback(s3fscurl.get(), pNotFoundCallbackParam)){
|
||||
S3FS_PRN_WARN("error from not found callback function(%s).", s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(500 == responseCode){
|
||||
// case of all other result, do retry.(11/13/2013)
|
||||
@ -244,35 +273,34 @@ int S3fsMultiCurl::MultiRead()
|
||||
|
||||
if(isPostpone){
|
||||
clist_req.erase(iter);
|
||||
clist_req.push_back(s3fscurl); // Re-evaluate at the end
|
||||
clist_req.push_back(std::move(s3fscurl)); // Re-evaluate at the end
|
||||
iter = clist_req.begin();
|
||||
}else{
|
||||
if(!isRetry || 0 != result){
|
||||
if(!isRetry || (!not_abort && 0 != result)){
|
||||
// If an EIO error has already occurred, it will be terminated
|
||||
// immediately even if retry processing is required.
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}else{
|
||||
S3fsCurl* retrycurl = NULL;
|
||||
|
||||
// Reset offset
|
||||
if(isNeedResetOffset){
|
||||
S3fsCurl::ResetOffset(s3fscurl);
|
||||
S3fsCurl::ResetOffset(s3fscurl.get());
|
||||
}
|
||||
|
||||
// For retry
|
||||
std::unique_ptr<S3fsCurl> retrycurl;
|
||||
const S3fsCurl* retrycurl_ptr = retrycurl.get(); // save this due to std::move below
|
||||
if(RetryCallback){
|
||||
retrycurl = RetryCallback(s3fscurl);
|
||||
if(NULL != retrycurl){
|
||||
clist_all.push_back(retrycurl);
|
||||
retrycurl = RetryCallback(s3fscurl.get());
|
||||
if(nullptr != retrycurl){
|
||||
clist_all.push_back(std::move(retrycurl));
|
||||
}else{
|
||||
// set EIO and wait for other parts.
|
||||
result = -EIO;
|
||||
}
|
||||
}
|
||||
if(s3fscurl != retrycurl){
|
||||
// cppcheck-suppress mismatchingContainers
|
||||
if(s3fscurl.get() != retrycurl_ptr){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
}
|
||||
iter = clist_req.erase(iter);
|
||||
@ -280,12 +308,11 @@ int S3fsMultiCurl::MultiRead()
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(0 != result){
|
||||
if(!not_abort && 0 != result){
|
||||
// If an EIO error has already occurred, clear all retry objects.
|
||||
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
S3fsCurl* s3fscurl = iter->get();
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
@ -306,8 +333,7 @@ int S3fsMultiCurl::Request()
|
||||
int result;
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
clist_req.push_back(s3fscurl);
|
||||
clist_req.push_back(std::move(*iter));
|
||||
}
|
||||
clist_all.clear();
|
||||
|
||||
@ -335,19 +361,19 @@ int S3fsMultiCurl::Request()
|
||||
void* S3fsMultiCurl::RequestPerformWrapper(void* arg)
|
||||
{
|
||||
S3fsCurl* s3fscurl= static_cast<S3fsCurl*>(arg);
|
||||
void* result = NULL;
|
||||
void* result = nullptr;
|
||||
if(!s3fscurl){
|
||||
return (void*)(intptr_t)(-EIO);
|
||||
return reinterpret_cast<void*>(static_cast<intptr_t>(-EIO));
|
||||
}
|
||||
if(s3fscurl->fpLazySetup){
|
||||
if(!s3fscurl->fpLazySetup(s3fscurl)){
|
||||
S3FS_PRN_ERR("Failed to lazy setup, then respond EIO.");
|
||||
result = (void*)(intptr_t)(-EIO);
|
||||
result = reinterpret_cast<void*>(static_cast<intptr_t>(-EIO));
|
||||
}
|
||||
}
|
||||
|
||||
if(!result){
|
||||
result = (void*)(intptr_t)(s3fscurl->RequestPerform());
|
||||
result = reinterpret_cast<void*>(static_cast<intptr_t>(s3fscurl->RequestPerform()));
|
||||
s3fscurl->DestroyCurlHandle(true, false);
|
||||
}
|
||||
|
||||
|
||||
@ -21,14 +21,18 @@
|
||||
#ifndef S3FS_CURL_MULTI_H_
|
||||
#define S3FS_CURL_MULTI_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedef
|
||||
//----------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
typedef std::vector<std::unique_ptr<S3fsCurl>> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
|
||||
typedef bool (*S3fsMultiNotFoundCallback)(S3fsCurl* s3fscurl, void* param); // callback for succeed multi request
|
||||
typedef std::unique_ptr<S3fsCurl> (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsMultiCurl
|
||||
@ -40,9 +44,13 @@ class S3fsMultiCurl
|
||||
|
||||
s3fscurllist_t clist_all; // all of curl requests
|
||||
s3fscurllist_t clist_req; // curl requests are sent
|
||||
bool not_abort; // complete all requests without aborting on errors
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiNotFoundCallback NotFoundCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
void* pSuccessCallbackParam;
|
||||
void* pNotFoundCallbackParam;
|
||||
|
||||
pthread_mutex_t completed_tids_lock;
|
||||
std::vector<pthread_t> completed_tids;
|
||||
@ -55,15 +63,18 @@ class S3fsMultiCurl
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
explicit S3fsMultiCurl(int maxParallelism);
|
||||
explicit S3fsMultiCurl(int maxParallelism, bool not_abort = false);
|
||||
~S3fsMultiCurl();
|
||||
|
||||
int GetMaxParallelism() { return maxParallelism; }
|
||||
int GetMaxParallelism() const { return maxParallelism; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiNotFoundCallback SetNotFoundCallback(S3fsMultiNotFoundCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
void* SetSuccessCallbackParam(void* param);
|
||||
void* SetNotFoundCallbackParam(void* param);
|
||||
bool Clear() { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
bool SetS3fsCurlObject(std::unique_ptr<S3fsCurl> s3fscurl);
|
||||
int Request();
|
||||
};
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "curl_util.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_auth.h"
|
||||
@ -37,23 +37,6 @@
|
||||
// This function is like curl_slist_append function, but this adds data by a-sorting.
|
||||
// Because AWS signature needs sorted header.
|
||||
//
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data)
|
||||
{
|
||||
if(!data){
|
||||
return list;
|
||||
}
|
||||
std::string strkey = data;
|
||||
std::string strval;
|
||||
|
||||
std::string::size_type pos = strkey.find(':', 0);
|
||||
if(std::string::npos != pos){
|
||||
strval = strkey.substr(pos + 1);
|
||||
strkey.erase(pos);
|
||||
}
|
||||
|
||||
return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str());
|
||||
}
|
||||
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value)
|
||||
{
|
||||
if(!key){
|
||||
@ -61,11 +44,11 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* k
|
||||
}
|
||||
|
||||
// key & value are trimmed and lower (only key)
|
||||
std::string strkey = trim(std::string(key));
|
||||
std::string strval = value ? trim(std::string(value)) : "";
|
||||
std::string strkey = trim(key);
|
||||
std::string strval = value ? trim(value) : "";
|
||||
std::string strnew = key + std::string(": ") + strval;
|
||||
char* data;
|
||||
if(NULL == (data = strdup(strnew.c_str()))){
|
||||
if(nullptr == (data = strdup(strnew.c_str()))){
|
||||
return list;
|
||||
}
|
||||
|
||||
@ -88,7 +71,8 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* k
|
||||
}
|
||||
|
||||
struct curl_slist* new_item;
|
||||
if(NULL == (new_item = static_cast<struct curl_slist*>(malloc(sizeof(*new_item))))){
|
||||
// Must use malloc since curl_slist_free_all calls free.
|
||||
if(nullptr == (new_item = static_cast<struct curl_slist*>(malloc(sizeof(*new_item))))){
|
||||
free(data);
|
||||
return list;
|
||||
}
|
||||
@ -107,9 +91,9 @@ struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key)
|
||||
return list;
|
||||
}
|
||||
|
||||
std::string strkey = trim(std::string(key));
|
||||
std::string strkey = trim(key);
|
||||
struct curl_slist **p = &list;
|
||||
for(;*p; p = &(*p)->next){
|
||||
while(*p){
|
||||
std::string strcur = (*p)->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strcur.find(':', 0))){
|
||||
@ -122,6 +106,8 @@ struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key)
|
||||
struct curl_slist *tmp = *p;
|
||||
*p = (*p)->next;
|
||||
free(tmp);
|
||||
}else{
|
||||
p = &(*p)->next;
|
||||
}
|
||||
}
|
||||
|
||||
@ -174,37 +160,6 @@ std::string get_header_value(const struct curl_slist* list, const std::string &k
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string get_canonical_headers(const struct curl_slist* list)
|
||||
{
|
||||
std::string canonical_headers;
|
||||
|
||||
if(!list){
|
||||
canonical_headers = "\n";
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strhead = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strhead.find(':', 0))){
|
||||
std::string strkey = trim(lower(strhead.substr(0, pos)));
|
||||
std::string strval = trim(strhead.substr(pos + 1));
|
||||
if (strval.empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strhead = strkey;
|
||||
strhead += ":";
|
||||
strhead += strval;
|
||||
}else{
|
||||
strhead = trim(lower(strhead));
|
||||
}
|
||||
canonical_headers += strhead;
|
||||
canonical_headers += "\n";
|
||||
}
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz)
|
||||
{
|
||||
std::string canonical_headers;
|
||||
@ -245,7 +200,7 @@ bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::strin
|
||||
if(!realpath){
|
||||
return false;
|
||||
}
|
||||
resourcepath = urlEncode(service_path + S3fsCred::GetBucket() + realpath);
|
||||
resourcepath = urlEncodePath(service_path + S3fsCred::GetBucket() + realpath);
|
||||
url = s3host + resourcepath;
|
||||
return true;
|
||||
}
|
||||
@ -257,8 +212,8 @@ std::string prepare_url(const char* url)
|
||||
std::string uri;
|
||||
std::string hostname;
|
||||
std::string path;
|
||||
std::string url_str = std::string(url);
|
||||
std::string token = std::string("/") + S3fsCred::GetBucket();
|
||||
std::string url_str = url;
|
||||
std::string token = "/" + S3fsCred::GetBucket();
|
||||
size_t bucket_pos;
|
||||
size_t bucket_length = token.size();
|
||||
size_t uri_length = 0;
|
||||
@ -290,40 +245,18 @@ std::string prepare_url(const char* url)
|
||||
return url_str;
|
||||
}
|
||||
|
||||
// [TODO]
|
||||
// This function uses temporary file, but should not use it.
|
||||
// For not using it, we implement function in each auth file(openssl, nss. gnutls).
|
||||
//
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5)
|
||||
{
|
||||
if(!pstr || '\0' == pstr[0]){
|
||||
S3FS_PRN_ERR("Parameter is wrong.");
|
||||
return false;
|
||||
}
|
||||
FILE* fp;
|
||||
if(NULL == (fp = tmpfile())){
|
||||
S3FS_PRN_ERR("Could not make tmpfile.");
|
||||
md5_t binary;
|
||||
if(!s3fs_md5(reinterpret_cast<const unsigned char*>(pstr), length, &binary)){
|
||||
return false;
|
||||
}
|
||||
if(length != fwrite(pstr, sizeof(char), length, fp)){
|
||||
S3FS_PRN_ERR("Failed to write tmpfile.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
int fd;
|
||||
if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){
|
||||
S3FS_PRN_ERR("Failed to make MD5.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
// base64 md5
|
||||
md5 = s3fs_get_content_md5(fd);
|
||||
if(md5.empty()){
|
||||
S3FS_PRN_ERR("Failed to make MD5.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
fclose(fp);
|
||||
|
||||
md5 = s3fs_base64(binary.data(), binary.size());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -331,8 +264,8 @@ std::string url_to_host(const std::string &url)
|
||||
{
|
||||
S3FS_PRN_INFO3("url is %s", url.c_str());
|
||||
|
||||
static const char HTTP[] = "http://";
|
||||
static const char HTTPS[] = "https://";
|
||||
static constexpr char HTTP[] = "http://";
|
||||
static constexpr char HTTPS[] = "https://";
|
||||
std::string hostname;
|
||||
|
||||
if (is_prefix(url.c_str(), HTTP)) {
|
||||
@ -386,17 +319,9 @@ const char* getCurlDebugHead(curl_infotype type)
|
||||
//
|
||||
// compare ETag ignoring quotes and case
|
||||
//
|
||||
bool etag_equals(std::string s1, std::string s2)
|
||||
bool etag_equals(const std::string& s1, const std::string& s2)
|
||||
{
|
||||
if(s1.length() > 1 && s1[0] == '\"' && *s1.rbegin() == '\"'){
|
||||
s1.erase(s1.size() - 1);
|
||||
s1.erase(0, 1);
|
||||
}
|
||||
if(s2.length() > 1 && s2[0] == '\"' && *s2.rbegin() == '\"'){
|
||||
s2.erase(s2.size() - 1);
|
||||
s2.erase(0, 1);
|
||||
}
|
||||
return 0 == strcasecmp(s1.c_str(), s2.c_str());
|
||||
return 0 == strcasecmp(peeloff(s1).c_str(), peeloff(s2).c_str());
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -23,13 +23,11 @@
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
class sse_type_t;
|
||||
enum class sse_type_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Functions
|
||||
//----------------------------------------------
|
||||
std::string GetContentMD5(int fd);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
@ -44,7 +42,7 @@ std::string url_to_host(const std::string &url);
|
||||
std::string get_bucket_host();
|
||||
const char* getCurlDebugHead(curl_infotype type);
|
||||
|
||||
bool etag_equals(std::string s1, std::string s2);
|
||||
bool etag_equals(const std::string& s1, const std::string& s2);
|
||||
|
||||
#endif // S3FS_CURL_UTIL_H_
|
||||
|
||||
|
||||
221
src/fdcache.cpp
221
src/fdcache.cpp
@ -23,13 +23,12 @@
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "fdcache.h"
|
||||
#include "fdcache_pseudofd.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_cred.h"
|
||||
@ -39,7 +38,7 @@
|
||||
//
|
||||
// The following symbols are used by FdManager::RawCheckAllCache().
|
||||
//
|
||||
#define CACHEDBG_FMT_DIR_PROB "Directory: %s"
|
||||
// These must be #defines due to string literal concatenation.
|
||||
#define CACHEDBG_FMT_HEAD "---------------------------------------------------------------------------\n" \
|
||||
"Check cache file and its stats file consistency at %s\n" \
|
||||
"---------------------------------------------------------------------------"
|
||||
@ -71,7 +70,7 @@
|
||||
// This process may not be complete, but it is easy way can
|
||||
// be realized.
|
||||
//
|
||||
#define NOCACHE_PATH_PREFIX_FORM " __S3FS_UNEXISTED_PATH_%lx__ / " // important space words for simply
|
||||
static constexpr char NOCACHE_PATH_PREFIX_FORM[] = " __S3FS_UNEXISTED_PATH_%lx__ / "; // important space words for simply
|
||||
|
||||
//------------------------------------------------
|
||||
// FdManager class variable
|
||||
@ -120,7 +119,7 @@ bool FdManager::DeleteCacheDirectory()
|
||||
}
|
||||
|
||||
std::string cache_path;
|
||||
if(!FdManager::MakeCachePath(NULL, cache_path, false)){
|
||||
if(!FdManager::MakeCachePath(nullptr, cache_path, false)){
|
||||
return false;
|
||||
}
|
||||
if(!delete_files_in_dir(cache_path.c_str(), true)){
|
||||
@ -156,18 +155,13 @@ int FdManager::DeleteCacheFile(const char* path)
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno);
|
||||
}
|
||||
result = -errno;
|
||||
return -errno;
|
||||
}
|
||||
if(!CacheFileStat::DeleteCacheFileStat(path)){
|
||||
if(ENOENT == errno){
|
||||
S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, errno);
|
||||
if(0 != (result = CacheFileStat::DeleteCacheFileStat(path))){
|
||||
if(-ENOENT == result){
|
||||
S3FS_PRN_DBG("failed to delete stat file(%s): errno=%d", path, result);
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, errno);
|
||||
}
|
||||
if(0 != errno){
|
||||
result = -errno;
|
||||
}else{
|
||||
result = -EIO;
|
||||
S3FS_PRN_ERR("failed to delete stat file(%s): errno=%d", path, result);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -219,7 +213,8 @@ bool FdManager::MakeRandomTempPath(const char* path, std::string& tmppath)
|
||||
{
|
||||
char szBuff[64];
|
||||
|
||||
sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry.
|
||||
snprintf(szBuff, sizeof(szBuff), NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry.
|
||||
szBuff[sizeof(szBuff) - 1] = '\0'; // for safety
|
||||
tmppath = szBuff;
|
||||
tmppath += path ? path : "";
|
||||
return true;
|
||||
@ -261,7 +256,7 @@ bool FdManager::InitFakeUsedDiskSize(off_t fake_freesize)
|
||||
{
|
||||
FdManager::fake_used_disk_space = 0; // At first, clear this value because this value is used in GetFreeDiskSpace.
|
||||
|
||||
off_t actual_freesize = FdManager::GetFreeDiskSpace(NULL);
|
||||
off_t actual_freesize = FdManager::GetFreeDiskSpace(nullptr);
|
||||
|
||||
if(fake_freesize < actual_freesize){
|
||||
FdManager::fake_used_disk_space = actual_freesize - fake_freesize;
|
||||
@ -271,9 +266,38 @@ bool FdManager::InitFakeUsedDiskSize(off_t fake_freesize)
|
||||
return true;
|
||||
}
|
||||
|
||||
off_t FdManager::GetTotalDiskSpaceByRatio(int ratio)
|
||||
{
|
||||
return FdManager::GetTotalDiskSpace(nullptr) * ratio / 100;
|
||||
}
|
||||
|
||||
off_t FdManager::GetTotalDiskSpace(const char* path)
|
||||
{
|
||||
struct statvfs vfsbuf;
|
||||
int result = FdManager::GetVfsStat(path, &vfsbuf);
|
||||
if(result == -1){
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t actual_totalsize = vfsbuf.f_blocks * vfsbuf.f_frsize;
|
||||
|
||||
return actual_totalsize;
|
||||
}
|
||||
|
||||
off_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
{
|
||||
struct statvfs vfsbuf;
|
||||
int result = FdManager::GetVfsStat(path, &vfsbuf);
|
||||
if(result == -1){
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t actual_freesize = vfsbuf.f_bavail * vfsbuf.f_frsize;
|
||||
|
||||
return (FdManager::fake_used_disk_space < actual_freesize ? (actual_freesize - FdManager::fake_used_disk_space) : 0);
|
||||
}
|
||||
|
||||
int FdManager::GetVfsStat(const char* path, struct statvfs* vfsbuf){
|
||||
std::string ctoppath;
|
||||
if(!FdManager::cache_dir.empty()){
|
||||
ctoppath = FdManager::cache_dir + "/";
|
||||
@ -289,14 +313,12 @@ off_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
}else{
|
||||
ctoppath += ".";
|
||||
}
|
||||
if(-1 == statvfs(ctoppath.c_str(), &vfsbuf)){
|
||||
if(-1 == statvfs(ctoppath.c_str(), vfsbuf)){
|
||||
S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno);
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
off_t actual_freesize = vfsbuf.f_bavail * vfsbuf.f_frsize;
|
||||
|
||||
return (FdManager::fake_used_disk_space < actual_freesize ? (actual_freesize - FdManager::fake_used_disk_space) : 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FdManager::IsSafeDiskSpace(const char* path, off_t size)
|
||||
@ -305,6 +327,18 @@ bool FdManager::IsSafeDiskSpace(const char* path, off_t size)
|
||||
return size + FdManager::GetEnsureFreeDiskSpace() <= fsize;
|
||||
}
|
||||
|
||||
bool FdManager::IsSafeDiskSpaceWithLog(const char* path, off_t size)
|
||||
{
|
||||
off_t fsize = FdManager::GetFreeDiskSpace(path);
|
||||
off_t needsize = size + FdManager::GetEnsureFreeDiskSpace();
|
||||
if(needsize <= fsize){
|
||||
return true;
|
||||
} else {
|
||||
S3FS_PRN_EXIT("There is no enough disk space for used as cache(or temporary) directory by s3fs. Requires %.3f MB, already has %.3f MB.", static_cast<double>(needsize) / 1024 / 1024, static_cast<double>(fsize) / 1024 / 1024);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool FdManager::HaveLseekHole()
|
||||
{
|
||||
if(FdManager::checked_lseek){
|
||||
@ -312,16 +346,13 @@ bool FdManager::HaveLseekHole()
|
||||
}
|
||||
|
||||
// create temporary file
|
||||
FILE* ptmpfp;
|
||||
int fd;
|
||||
if(NULL == (ptmpfp = MakeTempFile()) || -1 == (fd = fileno(ptmpfp))){
|
||||
int fd;
|
||||
std::unique_ptr<FILE, decltype(&s3fs_fclose)> ptmpfp(MakeTempFile(), &s3fs_fclose);
|
||||
if(nullptr == ptmpfp || -1 == (fd = fileno(ptmpfp.get()))){
|
||||
S3FS_PRN_ERR("failed to open temporary file by errno(%d)", errno);
|
||||
if(ptmpfp){
|
||||
fclose(ptmpfp);
|
||||
}
|
||||
FdManager::checked_lseek = true;
|
||||
FdManager::have_lseek_hole = false;
|
||||
return FdManager::have_lseek_hole;
|
||||
return false;
|
||||
}
|
||||
|
||||
// check SEEK_DATA/SEEK_HOLE options
|
||||
@ -338,7 +369,6 @@ bool FdManager::HaveLseekHole()
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
fclose(ptmpfp);
|
||||
|
||||
FdManager::checked_lseek = true;
|
||||
FdManager::have_lseek_hole = result;
|
||||
@ -388,11 +418,11 @@ FILE* FdManager::MakeTempFile() {
|
||||
fd = mkstemp(cfn);
|
||||
if (-1 == fd) {
|
||||
S3FS_PRN_ERR("failed to create tmp file. errno(%d)", errno);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
if (-1 == unlink(cfn)) {
|
||||
S3FS_PRN_ERR("failed to delete tmp file. errno(%d)", errno);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return fdopen(fd, "rb+");
|
||||
}
|
||||
@ -401,9 +431,9 @@ bool FdManager::HasOpenEntityFd(const char* path)
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
FdEntity* ent;
|
||||
const FdEntity* ent;
|
||||
int fd = -1;
|
||||
if(NULL == (ent = FdManager::singleton.GetFdEntity(path, fd, false, true))){
|
||||
if(nullptr == (ent = FdManager::singleton.GetFdEntity(path, fd, false, AutoLock::ALREADY_LOCKED))){
|
||||
return false;
|
||||
}
|
||||
return (0 < ent->GetOpenCount());
|
||||
@ -416,7 +446,7 @@ int FdManager::GetOpenFdCount(const char* path)
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
return FdManager::singleton.GetPseudoFdCount(path);
|
||||
return FdManager::singleton.GetPseudoFdCount(path);
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
@ -453,9 +483,8 @@ FdManager::~FdManager()
|
||||
{
|
||||
if(this == FdManager::get()){
|
||||
for(fdent_map_t::iterator iter = fent.begin(); fent.end() != iter; ++iter){
|
||||
FdEntity* ent = (*iter).second;
|
||||
S3FS_PRN_WARN("To exit with the cache file opened: path=%s, refcnt=%d", ent->GetPath(), ent->GetOpenCount());
|
||||
delete ent;
|
||||
FdEntity* ent = (*iter).second.get();
|
||||
S3FS_PRN_WARN("To exit with the cache file opened: path=%s, refcnt=%d", ent->GetPath().c_str(), ent->GetOpenCount());
|
||||
}
|
||||
fent.clear();
|
||||
|
||||
@ -480,27 +509,27 @@ FdManager::~FdManager()
|
||||
}
|
||||
}
|
||||
|
||||
FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, bool lock_already_held)
|
||||
FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, AutoLock::Type locktype)
|
||||
{
|
||||
S3FS_PRN_INFO3("[path=%s][pseudo_fd=%d]", SAFESTRPTR(path), existfd);
|
||||
|
||||
if(!path || '\0' == path[0]){
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock, locktype);
|
||||
|
||||
fdent_map_t::iterator iter = fent.find(std::string(path));
|
||||
fdent_map_t::iterator iter = fent.find(path);
|
||||
if(fent.end() != iter && iter->second){
|
||||
if(-1 == existfd){
|
||||
if(newfd){
|
||||
existfd = iter->second->OpenPseudoFd(O_RDWR); // [NOTE] O_RDWR flags
|
||||
}
|
||||
return iter->second;
|
||||
return iter->second.get();
|
||||
}else if(iter->second->FindPseudoFd(existfd)){
|
||||
if(newfd){
|
||||
existfd = iter->second->Dup(existfd);
|
||||
}
|
||||
return iter->second;
|
||||
return iter->second.get();
|
||||
}
|
||||
}
|
||||
|
||||
@ -508,14 +537,14 @@ FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, boo
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second && iter->second->FindPseudoFd(existfd)){
|
||||
// found opened fd in map
|
||||
if(0 == strcmp(iter->second->GetPath(), path)){
|
||||
if(iter->second->GetPath() == path){
|
||||
if(newfd){
|
||||
existfd = iter->second->Dup(existfd);
|
||||
}
|
||||
return iter->second;
|
||||
return iter->second.get();
|
||||
}
|
||||
// found fd, but it is used another file(file descriptor is recycled)
|
||||
// so returns NULL.
|
||||
// so returns nullptr.
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -525,26 +554,26 @@ FdEntity* FdManager::GetFdEntity(const char* path, int& existfd, bool newfd, boo
|
||||
// when the file is opened.
|
||||
if(!FdManager::IsCacheDir()){
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second && iter->second->IsOpen() && 0 == strcmp(iter->second->GetPath(), path)){
|
||||
return iter->second;
|
||||
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == path){
|
||||
return iter->second.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t size, time_t time, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
|
||||
FdEntity* FdManager::Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][size=%lld][time=%lld][flags=0x%x][force_tmpfile=%s][create=%s][ignore_modify=%s]", SAFESTRPTR(path), static_cast<long long>(size), static_cast<long long>(time), flags, (force_tmpfile ? "yes" : "no"), (is_create ? "yes" : "no"), (ignore_modify ? "yes" : "no"));
|
||||
S3FS_PRN_DBG("[path=%s][size=%lld][ts_mctime=%s][flags=0x%x][force_tmpfile=%s][create=%s][ignore_modify=%s]", SAFESTRPTR(path), static_cast<long long>(size), str(ts_mctime).c_str(), flags, (force_tmpfile ? "yes" : "no"), (is_create ? "yes" : "no"), (ignore_modify ? "yes" : "no"));
|
||||
|
||||
if(!path || '\0' == path[0]){
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
// search in mapping by key(path)
|
||||
fdent_map_t::iterator iter = fent.find(std::string(path));
|
||||
fdent_map_t::iterator iter = fent.find(path);
|
||||
if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){
|
||||
// If the cache directory is not specified, s3fs opens a temporary file
|
||||
// when the file is opened.
|
||||
@ -552,16 +581,15 @@ FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t siz
|
||||
// search a entity in all which opened the temporary file.
|
||||
//
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second && iter->second->IsOpen() && 0 == strcmp(iter->second->GetPath(), path)){
|
||||
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == path){
|
||||
break; // found opened fd in mapping
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FdEntity* ent;
|
||||
if(fent.end() != iter){
|
||||
// found
|
||||
ent = iter->second;
|
||||
FdEntity* ent = iter->second.get();
|
||||
|
||||
// [NOTE]
|
||||
// If the file is being modified and ignore_modify flag is false,
|
||||
@ -578,30 +606,31 @@ FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t siz
|
||||
}
|
||||
|
||||
// (re)open
|
||||
if(-1 == (fd = ent->Open(pmeta, size, time, flags, type))){
|
||||
if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
|
||||
S3FS_PRN_ERR("failed to (re)open and create new pseudo fd for path(%s).", path);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return ent;
|
||||
}else if(is_create){
|
||||
// not found
|
||||
std::string cache_path;
|
||||
if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){
|
||||
S3FS_PRN_ERR("failed to make cache path for object(%s).", path);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
// make new obj
|
||||
ent = new FdEntity(path, cache_path.c_str());
|
||||
std::unique_ptr<FdEntity> ent(new FdEntity(path, cache_path.c_str()));
|
||||
|
||||
// open
|
||||
if(-1 == (fd = ent->Open(pmeta, size, time, flags, type))){
|
||||
delete ent;
|
||||
return NULL;
|
||||
if(0 > (fd = ent->Open(pmeta, size, ts_mctime, flags, type))){
|
||||
S3FS_PRN_ERR("failed to open and create new pseudo fd for path(%s) errno:%d.", path, fd);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(!cache_path.empty()){
|
||||
// using cache
|
||||
fent[std::string(path)] = ent;
|
||||
return (fent[path] = std::move(ent)).get();
|
||||
}else{
|
||||
// not using cache, so the key of fdentity is set not really existing path.
|
||||
// (but not strictly unexisting path.)
|
||||
@ -612,12 +641,11 @@ FdEntity* FdManager::Open(int& fd, const char* path, headers_t* pmeta, off_t siz
|
||||
//
|
||||
std::string tmppath;
|
||||
FdManager::MakeRandomTempPath(path, tmppath);
|
||||
fent[tmppath] = ent;
|
||||
return (fent[tmppath] = std::move(ent)).get();
|
||||
}
|
||||
}else{
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
@ -634,11 +662,11 @@ FdEntity* FdManager::GetExistFdEntity(const char* path, int existfd)
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second && iter->second->FindPseudoFd(existfd)){
|
||||
// found existfd in entity
|
||||
return iter->second;
|
||||
return iter->second.get();
|
||||
}
|
||||
}
|
||||
// not found entity
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
FdEntity* FdManager::OpenExistFdEntity(const char* path, int& fd, int flags)
|
||||
@ -646,10 +674,10 @@ FdEntity* FdManager::OpenExistFdEntity(const char* path, int& fd, int flags)
|
||||
S3FS_PRN_DBG("[path=%s][flags=0x%x]", SAFESTRPTR(path), flags);
|
||||
|
||||
// search entity by path, and create pseudo fd
|
||||
FdEntity* ent = Open(fd, path, NULL, -1, -1, flags, false, false, false, AutoLock::NONE);
|
||||
FdEntity* ent = Open(fd, path, nullptr, -1, S3FS_OMIT_TS, flags, false, false, false, AutoLock::NONE);
|
||||
if(!ent){
|
||||
// Not found entity
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
@ -668,7 +696,7 @@ int FdManager::GetPseudoFdCount(const char* path)
|
||||
|
||||
// search from all entity.
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second && 0 == strcmp(iter->second->GetPath(), path)){
|
||||
if(iter->second && iter->second->GetPath() == path){
|
||||
// found the entity for the path
|
||||
return iter->second->GetOpenCount();
|
||||
}
|
||||
@ -689,7 +717,7 @@ void FdManager::Rename(const std::string &from, const std::string &to)
|
||||
// search a entity in all which opened the temporary file.
|
||||
//
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second && iter->second->IsOpen() && 0 == strcmp(iter->second->GetPath(), from.c_str())){
|
||||
if(iter->second && iter->second->IsOpen() && iter->second->GetPath() == from){
|
||||
break; // found opened fd in mapping
|
||||
}
|
||||
}
|
||||
@ -699,7 +727,7 @@ void FdManager::Rename(const std::string &from, const std::string &to)
|
||||
// found
|
||||
S3FS_PRN_DBG("[from=%s][to=%s]", from.c_str(), to.c_str());
|
||||
|
||||
FdEntity* ent = iter->second;
|
||||
std::unique_ptr<FdEntity> ent(std::move(iter->second));
|
||||
|
||||
// retrieve old fd entity from map
|
||||
fent.erase(iter);
|
||||
@ -712,13 +740,13 @@ void FdManager::Rename(const std::string &from, const std::string &to)
|
||||
}
|
||||
|
||||
// set new fd entity to map
|
||||
fent[fentmapkey] = ent;
|
||||
fent[fentmapkey] = std::move(ent);
|
||||
}
|
||||
}
|
||||
|
||||
bool FdManager::Close(FdEntity* ent, int fd)
|
||||
{
|
||||
S3FS_PRN_DBG("[ent->file=%s][pseudo_fd=%d]", ent ? ent->GetPath() : "", fd);
|
||||
S3FS_PRN_DBG("[ent->file=%s][pseudo_fd=%d]", ent ? ent->GetPath().c_str() : "", fd);
|
||||
|
||||
if(!ent || -1 == fd){
|
||||
return true; // returns success
|
||||
@ -726,21 +754,20 @@ bool FdManager::Close(FdEntity* ent, int fd)
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if(iter->second == ent){
|
||||
if(iter->second.get() == ent){
|
||||
ent->Close(fd);
|
||||
if(!ent->IsOpen()){
|
||||
// remove found entity from map.
|
||||
fent.erase(iter++);
|
||||
iter = fent.erase(iter);
|
||||
|
||||
// check another key name for entity value to be on the safe side
|
||||
for(; iter != fent.end(); ){
|
||||
if(iter->second == ent){
|
||||
fent.erase(iter++);
|
||||
if(iter->second.get() == ent){
|
||||
iter = fent.erase(iter);
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
delete ent;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -753,12 +780,11 @@ bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path)
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ){
|
||||
if(iter->second == ent){
|
||||
fent.erase(iter++);
|
||||
|
||||
if(iter->second.get() == ent){
|
||||
std::string tmppath;
|
||||
FdManager::MakeRandomTempPath(path, tmppath);
|
||||
fent[tmppath] = ent;
|
||||
iter->second.reset(ent);
|
||||
break;
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
@ -792,7 +818,7 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
|
||||
struct dirent* dent;
|
||||
std::string abs_path = cache_dir + "/" + S3fsCred::GetBucket() + path;
|
||||
|
||||
if(NULL == (dp = opendir(abs_path.c_str()))){
|
||||
if(nullptr == (dp = opendir(abs_path.c_str()))){
|
||||
S3FS_PRN_ERR("could not open cache dir(%s) - errno(%d)", abs_path.c_str(), errno);
|
||||
return;
|
||||
}
|
||||
@ -816,7 +842,7 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
|
||||
}else{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock, AutoLock::NO_WAIT);
|
||||
if (!auto_lock.isLockAcquired()) {
|
||||
S3FS_PRN_ERR("could not get fd_manager_lock when clean up file(%s)", next_path.c_str());
|
||||
S3FS_PRN_INFO("could not get fd_manager_lock when clean up file(%s), then skip it.", next_path.c_str());
|
||||
continue;
|
||||
}
|
||||
fdent_map_t::iterator iter = fent.find(next_path);
|
||||
@ -831,7 +857,7 @@ void FdManager::CleanupCacheDirInternal(const std::string &path)
|
||||
|
||||
bool FdManager::ReserveDiskSpace(off_t size)
|
||||
{
|
||||
if(IsSafeDiskSpace(NULL, size)){
|
||||
if(IsSafeDiskSpace(nullptr, size)){
|
||||
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
|
||||
free_disk_space += size;
|
||||
return true;
|
||||
@ -884,14 +910,14 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
|
||||
DIR* statsdir;
|
||||
std::string target_dir = cache_stat_top_dir;
|
||||
target_dir += sub_path;
|
||||
if(NULL == (statsdir = opendir(target_dir.c_str()))){
|
||||
if(nullptr == (statsdir = opendir(target_dir.c_str()))){
|
||||
S3FS_PRN_ERR("Could not open directory(%s) by errno(%d)", target_dir.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
// loop in directory of cache file's stats
|
||||
struct dirent* pdirent = NULL;
|
||||
while(NULL != (pdirent = readdir(statsdir))){
|
||||
const struct dirent* pdirent = nullptr;
|
||||
while(nullptr != (pdirent = readdir(statsdir))){
|
||||
if(DT_DIR == pdirent->d_type){
|
||||
// found directory
|
||||
if(0 == strcmp(pdirent->d_name, ".") || 0 == strcmp(pdirent->d_name, "..")){
|
||||
@ -943,6 +969,7 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not open cache file");
|
||||
continue;
|
||||
}
|
||||
scope_guard guard([&]() { close(cache_file_fd); });
|
||||
|
||||
// get inode number for cache file
|
||||
struct stat st;
|
||||
@ -951,7 +978,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not get file inode number for cache file");
|
||||
|
||||
close(cache_file_fd);
|
||||
continue;
|
||||
}
|
||||
ino_t cache_file_inode = st.st_ino;
|
||||
@ -964,7 +990,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD, "Could not load cache file stats information");
|
||||
|
||||
close(cache_file_fd);
|
||||
continue;
|
||||
}
|
||||
cfstat.Release();
|
||||
@ -975,7 +1000,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_FILE_PROB, object_file_path.c_str(), strOpenedWarn.c_str());
|
||||
S3FS_PRN_CACHE(fp, CACHEDBG_FMT_CRIT_HEAD2 "The cache file size(%lld) and the value(%lld) from cache file stats are different", static_cast<long long int>(st.st_size), static_cast<long long int>(pagelist.Size()));
|
||||
|
||||
close(cache_file_fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1007,7 +1031,6 @@ bool FdManager::RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const
|
||||
}
|
||||
err_area_list.clear();
|
||||
warn_area_list.clear();
|
||||
close(cache_file_fd);
|
||||
}
|
||||
}
|
||||
closedir(statsdir);
|
||||
@ -1026,7 +1049,7 @@ bool FdManager::CheckAllCache()
|
||||
if(FdManager::check_cache_output.empty()){
|
||||
fp = stdout;
|
||||
}else{
|
||||
if(NULL == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){
|
||||
if(nullptr == (fp = fopen(FdManager::check_cache_output.c_str(), "a+"))){
|
||||
S3FS_PRN_ERR("Could not open(create) output file(%s) for checking all cache by errno(%d)", FdManager::check_cache_output.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -47,7 +47,9 @@ class FdManager
|
||||
|
||||
private:
|
||||
static off_t GetFreeDiskSpace(const char* path);
|
||||
static off_t GetTotalDiskSpace(const char* path);
|
||||
static bool IsDir(const std::string* dir);
|
||||
static int GetVfsStat(const char* path, struct statvfs* vfsbuf);
|
||||
|
||||
int GetPseudoFdCount(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
@ -78,16 +80,18 @@ class FdManager
|
||||
static off_t SetEnsureFreeDiskSpace(off_t size);
|
||||
static bool InitFakeUsedDiskSize(off_t fake_freesize);
|
||||
static bool IsSafeDiskSpace(const char* path, off_t size);
|
||||
static bool IsSafeDiskSpaceWithLog(const char* path, off_t size);
|
||||
static void FreeReservedDiskSpace(off_t size);
|
||||
static bool ReserveDiskSpace(off_t size);
|
||||
static bool HaveLseekHole();
|
||||
static bool SetTmpDir(const char* dir);
|
||||
static bool CheckTmpDirExist();
|
||||
static FILE* MakeTempFile();
|
||||
static off_t GetTotalDiskSpaceByRatio(int ratio);
|
||||
|
||||
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true, bool lock_already_held = false);
|
||||
FdEntity* Open(int& fd, const char* path, headers_t* pmeta, off_t size, time_t time, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
|
||||
// Return FdEntity associated with path, returning nullptr on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true, AutoLock::Type locktype = AutoLock::NONE);
|
||||
FdEntity* Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
|
||||
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* OpenExistFdEntity(const char* path, int& fd, int flags = O_RDONLY);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
|
||||
@ -19,38 +19,18 @@
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_auto.h"
|
||||
#include "fdcache.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// AutoFdEntity methods
|
||||
//------------------------------------------------
|
||||
AutoFdEntity::AutoFdEntity() : pFdEntity(NULL), pseudo_fd(-1)
|
||||
AutoFdEntity::AutoFdEntity() : pFdEntity(nullptr), pseudo_fd(-1)
|
||||
{
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// The copy constructor should not be called, then this is private method.
|
||||
// Even if it is called, the consistency of the number of
|
||||
// references can be maintained, but this case is not assumed.
|
||||
//
|
||||
AutoFdEntity::AutoFdEntity(AutoFdEntity& other) : pFdEntity(NULL), pseudo_fd(-1)
|
||||
{
|
||||
S3FS_PRN_WARN("This method should not be called. Please check the caller.");
|
||||
|
||||
if(other.pFdEntity){
|
||||
if(-1 != (pseudo_fd = other.pFdEntity->Dup(other.pseudo_fd))){
|
||||
pFdEntity = other.pFdEntity;
|
||||
}else{
|
||||
S3FS_PRN_ERR("Failed duplicating fd in AutoFdEntity.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AutoFdEntity::~AutoFdEntity()
|
||||
{
|
||||
Close();
|
||||
@ -63,7 +43,7 @@ bool AutoFdEntity::Close()
|
||||
S3FS_PRN_ERR("Failed to close fdentity.");
|
||||
return false;
|
||||
}
|
||||
pFdEntity = NULL;
|
||||
pFdEntity = nullptr;
|
||||
pseudo_fd = -1;
|
||||
}
|
||||
return true;
|
||||
@ -81,30 +61,33 @@ int AutoFdEntity::Detach()
|
||||
}
|
||||
int fd = pseudo_fd;
|
||||
pseudo_fd = -1;
|
||||
pFdEntity = NULL;
|
||||
pFdEntity = nullptr;
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
bool AutoFdEntity::Attach(const char* path, int existfd)
|
||||
FdEntity* AutoFdEntity::Attach(const char* path, int existfd)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(NULL == (pFdEntity = FdManager::get()->GetFdEntity(path, existfd, false))){
|
||||
if(nullptr == (pFdEntity = FdManager::get()->GetFdEntity(path, existfd, false))){
|
||||
S3FS_PRN_DBG("Could not find fd entity object(file=%s, pseudo_fd=%d)", path, existfd);
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
pseudo_fd = existfd;
|
||||
return true;
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
FdEntity* AutoFdEntity::Open(const char* path, headers_t* pmeta, off_t size, time_t time, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type)
|
||||
FdEntity* AutoFdEntity::Open(const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type, int* error)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(NULL == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, time, flags, force_tmpfile, is_create, ignore_modify, type))){
|
||||
if(nullptr == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, ts_mctime, flags, force_tmpfile, is_create, ignore_modify, type))){
|
||||
if(error){
|
||||
*error = pseudo_fd;
|
||||
}
|
||||
pseudo_fd = -1;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
@ -117,8 +100,8 @@ FdEntity* AutoFdEntity::GetExistFdEntity(const char* path, int existfd)
|
||||
Close();
|
||||
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = FdManager::get()->GetExistFdEntity(path, existfd))){
|
||||
return NULL;
|
||||
if(nullptr == (ent = FdManager::get()->GetExistFdEntity(path, existfd))){
|
||||
return nullptr;
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
@ -127,34 +110,12 @@ FdEntity* AutoFdEntity::OpenExistFdEntity(const char* path, int flags)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(NULL == (pFdEntity = FdManager::get()->OpenExistFdEntity(path, pseudo_fd, flags))){
|
||||
return NULL;
|
||||
if(nullptr == (pFdEntity = FdManager::get()->OpenExistFdEntity(path, pseudo_fd, flags))){
|
||||
return nullptr;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This operator should not be called, then this is private method.
|
||||
// Even if it is called, the consistency of the number of
|
||||
// references can be maintained, but this case is not assumed.
|
||||
//
|
||||
bool AutoFdEntity::operator=(AutoFdEntity& other)
|
||||
{
|
||||
S3FS_PRN_WARN("This method should not be called. Please check the caller.");
|
||||
|
||||
Close();
|
||||
|
||||
if(other.pFdEntity){
|
||||
if(-1 != (pseudo_fd = other.pFdEntity->Dup(other.pseudo_fd))){
|
||||
pFdEntity = other.pFdEntity;
|
||||
}else{
|
||||
S3FS_PRN_ERR("Failed duplicating fd in AutoFdEntity.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -21,8 +21,12 @@
|
||||
#ifndef S3FS_FDCACHE_AUTO_H_
|
||||
#define S3FS_FDCACHE_AUTO_H_
|
||||
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "autolock.h"
|
||||
#include "fdcache_entity.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//------------------------------------------------
|
||||
// class AutoFdEntity
|
||||
@ -39,8 +43,10 @@ class AutoFdEntity
|
||||
int pseudo_fd;
|
||||
|
||||
private:
|
||||
AutoFdEntity(AutoFdEntity& other);
|
||||
bool operator=(AutoFdEntity& other);
|
||||
AutoFdEntity(const AutoFdEntity&) = delete;
|
||||
AutoFdEntity(AutoFdEntity&&) = delete;
|
||||
AutoFdEntity& operator=(const AutoFdEntity&) = delete;
|
||||
AutoFdEntity& operator=(AutoFdEntity&&) = delete;
|
||||
|
||||
public:
|
||||
AutoFdEntity();
|
||||
@ -48,10 +54,10 @@ class AutoFdEntity
|
||||
|
||||
bool Close();
|
||||
int Detach();
|
||||
bool Attach(const char* path, int existfd);
|
||||
FdEntity* Attach(const char* path, int existfd);
|
||||
int GetPseudoFd() const { return pseudo_fd; }
|
||||
|
||||
FdEntity* Open(const char* path, headers_t* pmeta, off_t size, time_t time, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type);
|
||||
FdEntity* Open(const char* path, const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, AutoLock::Type type, int* error = nullptr);
|
||||
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* OpenExistFdEntity(const char* path, int flags = O_RDONLY);
|
||||
};
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -21,9 +21,13 @@
|
||||
#ifndef S3FS_FDCACHE_ENTITY_H_
|
||||
#define S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <memory>
|
||||
|
||||
#include "autolock.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "fdcache_fdinfo.h"
|
||||
#include "fdcache_untreated.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
//------------------------------------------------
|
||||
@ -32,24 +36,37 @@
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
// [NOTE]
|
||||
// Distinguish between meta pending and new file creation pending,
|
||||
// because the processing(request) at these updates is different.
|
||||
// Therefore, the pending state is expressed by this enum type.
|
||||
//
|
||||
enum class pending_status_t {
|
||||
NO_UPDATE_PENDING = 0,
|
||||
UPDATE_META_PENDING, // pending meta header
|
||||
CREATE_FILE_PENDING // pending file creation and meta header
|
||||
};
|
||||
|
||||
pthread_mutex_t fdent_lock;
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
static bool streamupload; // whether stream uploading.
|
||||
|
||||
mutable pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
std::string path; // object path
|
||||
int physical_fd; // physical file(cache or temporary file) descriptor
|
||||
UntreatedParts untreated_list; // list of untreated parts that have been written and not yet uploaded(for streamupload)
|
||||
fdinfo_map_t pseudo_fd_map; // pseudo file descriptor information map
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
ino_t inode; // inode number for cache file
|
||||
headers_t orgmeta; // original headers at opening
|
||||
off_t size_orgmeta; // original file size in original headers
|
||||
|
||||
pthread_mutex_t fdent_data_lock;// protects the following members
|
||||
mutable pthread_mutex_t fdent_data_lock;// protects the following members
|
||||
PageList pagelist;
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
bool is_meta_pending;
|
||||
pending_status_t pending_status;// status for new file creation and meta update
|
||||
struct timespec holding_mtime; // if mtime is updated while the file is open, it is set time_t value
|
||||
|
||||
private:
|
||||
@ -57,57 +74,71 @@ class FdEntity
|
||||
static ino_t GetInode(int fd);
|
||||
|
||||
void Clear();
|
||||
ino_t GetInode();
|
||||
ino_t GetInode() const;
|
||||
int OpenMirrorFile();
|
||||
int NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start = 0, off_t size = 0); // size=0 means loading to end
|
||||
PseudoFdInfo* CheckPseudoFdFlags(int fd, bool writable, bool lock_already_held = false);
|
||||
bool IsUploading(bool lock_already_held = false);
|
||||
PseudoFdInfo* CheckPseudoFdFlags(int fd, bool writable, AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool IsUploading(AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
bool SetAllStatusUnloaded() { return SetAllStatus(false); }
|
||||
int NoCachePreMultipartPost(PseudoFdInfo* pseudo_obj);
|
||||
int NoCacheMultipartPost(PseudoFdInfo* pseudo_obj, int tgfd, off_t start, off_t size);
|
||||
int NoCacheCompleteMultipartPost(PseudoFdInfo* pseudo_obj);
|
||||
int RowFlushNoMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
|
||||
int RowFlushNoMultipart(const PseudoFdInfo* pseudo_obj, const char* tpath);
|
||||
int RowFlushMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
|
||||
int RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
|
||||
ssize_t WriteNoMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
|
||||
int RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpath);
|
||||
ssize_t WriteNoMultipart(const PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
|
||||
ssize_t WriteMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
|
||||
ssize_t WriteMixMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
|
||||
int UploadPendingMeta();
|
||||
ssize_t WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool ReserveDiskSpace(off_t size);
|
||||
|
||||
bool AddUntreated(off_t start, off_t size);
|
||||
|
||||
bool IsDirtyMetadata() const;
|
||||
|
||||
public:
|
||||
static bool GetNoMixMultipart() { return mixmultipart; }
|
||||
static bool SetNoMixMultipart();
|
||||
static bool GetStreamUpload() { return streamupload; }
|
||||
static bool SetStreamUpload(bool isstream);
|
||||
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
explicit FdEntity(const char* tpath = nullptr, const char* cpath = nullptr);
|
||||
~FdEntity();
|
||||
FdEntity(const FdEntity&) = delete;
|
||||
FdEntity(FdEntity&&) = delete;
|
||||
FdEntity& operator=(const FdEntity&) = delete;
|
||||
FdEntity& operator=(FdEntity&&) = delete;
|
||||
|
||||
void Close(int fd);
|
||||
bool IsOpen() const { return (-1 != physical_fd); }
|
||||
bool FindPseudoFd(int fd, bool lock_already_held = false);
|
||||
int Open(const headers_t* pmeta, off_t size, time_t time, int flags, AutoLock::Type type);
|
||||
bool LoadAll(int fd, headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
|
||||
int Dup(int fd, bool lock_already_held = false);
|
||||
int OpenPseudoFd(int flags = O_RDONLY, bool lock_already_held = false);
|
||||
int GetOpenCount(bool lock_already_held = false);
|
||||
const char* GetPath() const { return path.c_str(); }
|
||||
bool FindPseudoFd(int fd, AutoLock::Type locktype = AutoLock::NONE) const;
|
||||
int Open(const headers_t* pmeta, off_t size, const struct timespec& ts_mctime, int flags, AutoLock::Type type);
|
||||
bool LoadAll(int fd, headers_t* pmeta = nullptr, off_t* size = nullptr, bool force_load = false);
|
||||
int Dup(int fd, AutoLock::Type locktype = AutoLock::NONE);
|
||||
int OpenPseudoFd(int flags = O_RDONLY, AutoLock::Type locktype = AutoLock::NONE);
|
||||
int GetOpenCount(AutoLock::Type locktype = AutoLock::NONE) const;
|
||||
const std::string& GetPath() const { return path; }
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetPhysicalFd() const { return physical_fd; }
|
||||
bool IsModified() const;
|
||||
bool MergeOrgMeta(headers_t& updatemeta);
|
||||
int UploadPending(int fd, AutoLock::Type type);
|
||||
|
||||
bool GetStats(struct stat& st, bool lock_already_held = false);
|
||||
int SetCtime(struct timespec time, bool lock_already_held = false);
|
||||
int SetAtime(struct timespec time, bool lock_already_held = false);
|
||||
int SetMCtime(struct timespec mtime, struct timespec ctime, bool lock_already_held = false);
|
||||
bool GetStats(struct stat& st, AutoLock::Type locktype = AutoLock::NONE) const;
|
||||
int SetCtime(struct timespec time, AutoLock::Type locktype = AutoLock::NONE);
|
||||
int SetAtime(struct timespec time, AutoLock::Type locktype = AutoLock::NONE);
|
||||
int SetMCtime(struct timespec mtime, struct timespec ctime, AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool UpdateCtime();
|
||||
bool UpdateAtime();
|
||||
bool UpdateMtime(bool clear_holding_mtime = false);
|
||||
bool UpdateMCtime();
|
||||
bool SetHoldingMtime(struct timespec mtime, bool lock_already_held = false);
|
||||
bool ClearHoldingMtime(bool lock_already_held = false);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetXattr(std::string& xattr);
|
||||
bool SetHoldingMtime(struct timespec mtime, AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool ClearHoldingMtime(AutoLock::Type locktype = AutoLock::NONE);
|
||||
bool GetSize(off_t& size) const;
|
||||
bool GetXattr(std::string& xattr) const;
|
||||
bool SetXattr(const std::string& xattr);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
@ -117,19 +148,23 @@ class FdEntity
|
||||
int Load(off_t start, off_t size, AutoLock::Type type, bool is_modified_flag = false); // size=0 means loading to end
|
||||
|
||||
off_t BytesModified();
|
||||
int RowFlush(int fd, const char* tpath, bool force_sync = false);
|
||||
int Flush(int fd, bool force_sync = false) { return RowFlush(fd, NULL, force_sync); }
|
||||
int RowFlush(int fd, const char* tpath, AutoLock::Type type, bool force_sync = false);
|
||||
int Flush(int fd, AutoLock::Type type, bool force_sync = false) { return RowFlush(fd, nullptr, type, force_sync); }
|
||||
|
||||
ssize_t Read(int fd, char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(int fd, const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool ReserveDiskSpace(off_t size);
|
||||
bool PunchHole(off_t start = 0, size_t size = 0);
|
||||
|
||||
void MarkDirtyNewFile();
|
||||
bool IsDirtyNewFile() const;
|
||||
void MarkDirtyMetadata();
|
||||
|
||||
bool GetLastUpdateUntreatedPart(off_t& start, off_t& size) const;
|
||||
bool ReplaceLastUpdateUntreatedPart(off_t front_start, off_t front_size, off_t behind_start, off_t behind_size);
|
||||
};
|
||||
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
typedef std::map<std::string, std::unique_ptr<FdEntity>> fdent_map_t; // key=path, value=FdEntity*
|
||||
|
||||
#endif // S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
|
||||
@ -18,20 +18,94 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "fdcache_fdinfo.h"
|
||||
#include "fdcache_pseudofd.h"
|
||||
#include "autolock.h"
|
||||
#include "fdcache_entity.h"
|
||||
#include "curl.h"
|
||||
#include "string_util.h"
|
||||
#include "threadpoolman.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdInfo class variables
|
||||
//------------------------------------------------
|
||||
int PseudoFdInfo::max_threads = -1;
|
||||
int PseudoFdInfo::opt_max_threads = -1;
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdInfo class methods
|
||||
//------------------------------------------------
|
||||
//
|
||||
// Worker function for uploading
|
||||
//
|
||||
void* PseudoFdInfo::MultipartUploadThreadWorker(void* arg)
|
||||
{
|
||||
std::unique_ptr<pseudofdinfo_thparam> pthparam(static_cast<pseudofdinfo_thparam*>(arg));
|
||||
if(!pthparam || !(pthparam->ppseudofdinfo)){
|
||||
return reinterpret_cast<void*>(-EIO);
|
||||
}
|
||||
S3FS_PRN_INFO3("Upload Part Thread [tpath=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
|
||||
int result;
|
||||
{
|
||||
AutoLock auto_lock(&(pthparam->ppseudofdinfo->upload_list_lock));
|
||||
|
||||
if(0 != (result = pthparam->ppseudofdinfo->last_result)){
|
||||
S3FS_PRN_DBG("Already occurred error, thus this thread worker is exiting.");
|
||||
|
||||
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::ALREADY_LOCKED)){ // result will be overwritten with the same value.
|
||||
result = -EIO;
|
||||
}
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
}
|
||||
|
||||
// setup and make curl object
|
||||
std::unique_ptr<S3fsCurl> s3fscurl(S3fsCurl::CreateParallelS3fsCurl(pthparam->path.c_str(), pthparam->upload_fd, pthparam->start, pthparam->size, pthparam->part_num, pthparam->is_copy, pthparam->petag, pthparam->upload_id, result));
|
||||
if(nullptr == s3fscurl){
|
||||
S3FS_PRN_ERR("failed creating s3fs curl object for uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
|
||||
// set result for exiting
|
||||
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
|
||||
result = -EIO;
|
||||
}
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
|
||||
// Send request and get result
|
||||
if(0 == (result = s3fscurl->RequestPerform())){
|
||||
S3FS_PRN_DBG("succeed uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
if(!s3fscurl->MixMultipartPostComplete()){
|
||||
S3FS_PRN_ERR("failed completion uploading [path=%s][start=%lld][size=%lld][part=%d]", pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
result = -EIO;
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed uploading with error(%d) [path=%s][start=%lld][size=%lld][part=%d]", result, pthparam->path.c_str(), static_cast<long long>(pthparam->start), static_cast<long long>(pthparam->size), pthparam->part_num);
|
||||
}
|
||||
s3fscurl->DestroyCurlHandle(true, false);
|
||||
|
||||
// set result
|
||||
if(!pthparam->ppseudofdinfo->CompleteInstruction(result, AutoLock::NONE)){
|
||||
S3FS_PRN_WARN("This thread worker is about to end, so it doesn't return an EIO here and runs to the end.");
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdInfo methods
|
||||
//------------------------------------------------
|
||||
PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(fd), flags(0) //, is_lock_init(false)
|
||||
PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(fd), flags(0), upload_fd(-1), uploaded_sem(0), instruct_count(0), completed_count(0), last_result(0)
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
@ -53,6 +127,8 @@ PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(
|
||||
|
||||
PseudoFdInfo::~PseudoFdInfo()
|
||||
{
|
||||
Clear(); // call before destrying the mutex
|
||||
|
||||
if(is_lock_init){
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_destroy(&upload_list_lock))){
|
||||
@ -61,11 +137,17 @@ PseudoFdInfo::~PseudoFdInfo()
|
||||
}
|
||||
is_lock_init = false;
|
||||
}
|
||||
Clear();
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::Clear()
|
||||
{
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!CancelAllThreads() || !ResetUploadInfo(AutoLock::NONE)){
|
||||
return false;
|
||||
}
|
||||
CloseUploadFd();
|
||||
|
||||
if(-1 != pseudo_fd){
|
||||
PseudoFdManager::Release(pseudo_fd);
|
||||
}
|
||||
@ -75,6 +157,51 @@ bool PseudoFdInfo::Clear()
|
||||
return true;
|
||||
}
|
||||
|
||||
void PseudoFdInfo::CloseUploadFd()
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
|
||||
if(-1 != upload_fd){
|
||||
close(upload_fd);
|
||||
}
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::OpenUploadFd(AutoLock::Type type)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock, type);
|
||||
|
||||
if(-1 != upload_fd){
|
||||
// already initialized
|
||||
return true;
|
||||
}
|
||||
if(-1 == physical_fd){
|
||||
S3FS_PRN_ERR("physical_fd is not initialized yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// duplicate fd
|
||||
int fd;
|
||||
if(-1 == (fd = dup(physical_fd))){
|
||||
S3FS_PRN_ERR("Could not duplicate physical file descriptor(errno=%d)", errno);
|
||||
return false;
|
||||
}
|
||||
scope_guard guard([&]() { close(fd); });
|
||||
|
||||
if(0 != lseek(fd, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("Could not seek physical file descriptor(errno=%d)", errno);
|
||||
return false;
|
||||
}
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
S3FS_PRN_ERR("Invalid file descriptor for uploading(errno=%d)", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
guard.dismiss();
|
||||
upload_fd = fd;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::Set(int fd, int open_flags)
|
||||
{
|
||||
if(-1 == fd){
|
||||
@ -108,37 +235,75 @@ bool PseudoFdInfo::Readable() const
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ClearUploadInfo(bool is_cancel_mp, bool lock_already_held)
|
||||
bool PseudoFdInfo::ClearUploadInfo(bool is_cancel_mp)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
|
||||
if(is_cancel_mp){
|
||||
// [TODO]
|
||||
// If we have any uploaded parts, we should delete them here.
|
||||
// We haven't implemented it yet, but it will be implemented in the future.
|
||||
// (User can delete them in the utility mode of s3fs.)
|
||||
//
|
||||
S3FS_PRN_INFO("Implementation of cancellation process for multipart upload is awaited.");
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!CancelAllThreads()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return ResetUploadInfo(AutoLock::NONE);
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ResetUploadInfo(AutoLock::Type type)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock, type);
|
||||
|
||||
upload_id.erase();
|
||||
upload_list.clear();
|
||||
ClearUntreated(true);
|
||||
instruct_count = 0;
|
||||
completed_count = 0;
|
||||
last_result = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::InitialUploadInfo(const std::string& id)
|
||||
bool PseudoFdInfo::RowInitialUploadInfo(const std::string& id, bool is_cancel_mp, AutoLock::Type type)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
|
||||
if(!ClearUploadInfo(true, true)){
|
||||
if(is_cancel_mp && AutoLock::ALREADY_LOCKED == type){
|
||||
S3FS_PRN_ERR("Internal Error: Could not call this with type=AutoLock::ALREADY_LOCKED and is_cancel_mp=true");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(is_cancel_mp){
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!ClearUploadInfo(is_cancel_mp)){
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!ResetUploadInfo(type)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
AutoLock auto_lock(&upload_list_lock, type);
|
||||
upload_id = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::CompleteInstruction(int result, AutoLock::Type type)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock, type);
|
||||
|
||||
if(0 != result){
|
||||
last_result = result;
|
||||
}
|
||||
|
||||
if(0 >= instruct_count){
|
||||
S3FS_PRN_ERR("Internal error: instruct_count caused an underflow.");
|
||||
return false;
|
||||
}
|
||||
--instruct_count;
|
||||
++completed_count;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::GetUploadId(std::string& id) const
|
||||
{
|
||||
if(!IsUploading()){
|
||||
@ -149,7 +314,7 @@ bool PseudoFdInfo::GetUploadId(std::string& id) const
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::GetEtaglist(etaglist_t& list)
|
||||
bool PseudoFdInfo::GetEtaglist(etaglist_t& list) const
|
||||
{
|
||||
if(!IsUploading()){
|
||||
S3FS_PRN_ERR("Multipart Upload has not started yet.");
|
||||
@ -198,52 +363,680 @@ bool PseudoFdInfo::AppendUploadPart(off_t start, off_t size, bool is_copy, etagp
|
||||
int partnumber = static_cast<int>(upload_list.size()) + 1;
|
||||
|
||||
// add new part
|
||||
etag_entities.push_back(etagpair(NULL, partnumber)); // [NOTE] Create the etag entity and register it in the list.
|
||||
etagpair& etag_entity = etag_entities.back();
|
||||
filepart newpart(false, physical_fd, start, size, is_copy, &etag_entity);
|
||||
upload_list.push_back(newpart);
|
||||
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, partnumber)); // [NOTE] Create the etag entity and register it in the list.
|
||||
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
|
||||
|
||||
// set etag pointer
|
||||
if(ppetag){
|
||||
*ppetag = &etag_entity;
|
||||
*ppetag = petag_entity;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void PseudoFdInfo::ClearUntreated(bool lock_already_held)
|
||||
//
|
||||
// Utility for sorting upload list
|
||||
//
|
||||
static bool filepart_partnum_compare(const filepart& src1, const filepart& src2)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
|
||||
untreated_list.ClearAll();
|
||||
return src1.get_part_number() < src2.get_part_number();
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ClearUntreated(off_t start, off_t size)
|
||||
bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type)
|
||||
{
|
||||
//S3FS_PRN_DBG("[start=%lld][size=%lld][part_num=%d][is_copy=%s]", static_cast<long long int>(start), static_cast<long long int>(size), part_num, (is_copy ? "true" : "false"));
|
||||
|
||||
if(!IsUploading()){
|
||||
S3FS_PRN_ERR("Multipart Upload has not started yet.");
|
||||
return false;
|
||||
}
|
||||
if(start < 0 || size <= 0 || part_num < 0 || !ppetag){
|
||||
S3FS_PRN_ERR("Parameters are wrong.");
|
||||
return false;
|
||||
}
|
||||
|
||||
AutoLock auto_lock(&upload_list_lock, type);
|
||||
|
||||
// insert new part
|
||||
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, part_num));
|
||||
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
|
||||
|
||||
// sort by part number
|
||||
std::sort(upload_list.begin(), upload_list.end(), filepart_partnum_compare);
|
||||
|
||||
// set etag pointer
|
||||
*ppetag = petag_entity;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method only launches the upload thread.
|
||||
// Check the maximum number of threads before calling.
|
||||
//
|
||||
bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy, AutoLock::Type type)
|
||||
{
|
||||
//S3FS_PRN_DBG("[path=%s][mplist(%zu)]", SAFESTRPTR(path), mplist.size());
|
||||
|
||||
AutoLock auto_lock(&upload_list_lock, type);
|
||||
|
||||
if(mplist.empty()){
|
||||
// nothing to do
|
||||
return true;
|
||||
}
|
||||
if(!OpenUploadFd(AutoLock::ALREADY_LOCKED)){
|
||||
return false;
|
||||
}
|
||||
|
||||
for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){
|
||||
// Insert upload part
|
||||
etagpair* petag = nullptr;
|
||||
if(!InsertUploadPart(iter->start, iter->size, iter->part_num, is_copy, &petag, AutoLock::ALREADY_LOCKED)){
|
||||
S3FS_PRN_ERR("Failed to insert insert upload part(path=%s, start=%lld, size=%lld, part=%d, copy=%s) to mplist", SAFESTRPTR(path), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
|
||||
return false;
|
||||
}
|
||||
|
||||
// make parameter for my thread
|
||||
pseudofdinfo_thparam* thargs = new pseudofdinfo_thparam;
|
||||
thargs->ppseudofdinfo = this;
|
||||
thargs->path = SAFESTRPTR(path);
|
||||
thargs->upload_id = upload_id;
|
||||
thargs->upload_fd = upload_fd;
|
||||
thargs->start = iter->start;
|
||||
thargs->size = iter->size;
|
||||
thargs->is_copy = is_copy;
|
||||
thargs->part_num = iter->part_num;
|
||||
thargs->petag = petag;
|
||||
|
||||
// make parameter for thread pool
|
||||
std::unique_ptr<thpoolman_param> ppoolparam(new thpoolman_param);
|
||||
ppoolparam->args = thargs;
|
||||
ppoolparam->psem = &uploaded_sem;
|
||||
ppoolparam->pfunc = PseudoFdInfo::MultipartUploadThreadWorker;
|
||||
|
||||
// setup instruction
|
||||
if(!ThreadPoolMan::Instruct(std::move(ppoolparam))){
|
||||
S3FS_PRN_ERR("failed setup instruction for uploading.");
|
||||
delete thargs;
|
||||
return false;
|
||||
}
|
||||
++instruct_count;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][to_upload_list(%zu)][copy_list(%zu)]", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
|
||||
|
||||
result = 0;
|
||||
|
||||
if(!OpenUploadFd(AutoLock::NONE)){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!ParallelMultipartUpload(path, to_upload_list, false, AutoLock::NONE) || !ParallelMultipartUpload(path, copy_list, true, AutoLock::NONE)){
|
||||
S3FS_PRN_ERR("Failed setup instruction for uploading(path=%s, to_upload_list=%zu, copy_list=%zu).", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait for all thread exiting
|
||||
result = WaitAllThreadsExit();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Upload the last updated Untreated area
|
||||
//
|
||||
// [Overview]
|
||||
// Uploads untreated areas with the maximum multipart upload size as the
|
||||
// boundary.
|
||||
//
|
||||
// * The starting position of the untreated area is aligned with the maximum
|
||||
// multipart upload size as the boundary.
|
||||
// * If there is an uploaded area that overlaps with the aligned untreated
|
||||
// area, that uploaded area is canceled and absorbed by the untreated area.
|
||||
// * Upload only when the aligned untreated area exceeds the maximum multipart
|
||||
// upload size.
|
||||
// * When the start position of the untreated area is changed to boundary
|
||||
// alignment(to backward), and if that gap area is remained, that area is
|
||||
// rest to untreated area.
|
||||
//
|
||||
ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d]", SAFESTRPTR(path), pseudo_fd, physical_fd);
|
||||
|
||||
if(!path || -1 == physical_fd || -1 == pseudo_fd || !pfdent){
|
||||
S3FS_PRN_ERR("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not writable, or pfdent is nullptr.", pseudo_fd, physical_fd, path);
|
||||
return -EBADF;
|
||||
}
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
|
||||
//
|
||||
// Get last update untreated area
|
||||
//
|
||||
off_t last_untreated_start = 0;
|
||||
off_t last_untreated_size = 0;
|
||||
if(!pfdent->GetLastUpdateUntreatedPart(last_untreated_start, last_untreated_size) || last_untreated_start < 0 || last_untreated_size <= 0){
|
||||
S3FS_PRN_WARN("Not found last update untreated area or it is empty, thus return without any error.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Aligns the start position of the last updated raw area with the boundary
|
||||
//
|
||||
// * Align the last updated raw space with the maximum upload size boundary.
|
||||
// * The remaining size of the part before the boundary is will not be uploaded.
|
||||
//
|
||||
off_t max_mp_size = S3fsCurl::GetMultipartSize();
|
||||
off_t aligned_start = ((last_untreated_start / max_mp_size) + (0 < (last_untreated_start % max_mp_size) ? 1 : 0)) * max_mp_size;
|
||||
if((last_untreated_start + last_untreated_size) <= aligned_start){
|
||||
S3FS_PRN_INFO("After the untreated area(start=%lld, size=%lld) is aligned with the boundary, the aligned start(%lld) exceeds the untreated area, so there is nothing to do.", static_cast<long long int>(last_untreated_start), static_cast<long long int>(last_untreated_size), static_cast<long long int>(aligned_start));
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t aligned_size = (((last_untreated_start + last_untreated_size) - aligned_start) / max_mp_size) * max_mp_size;
|
||||
if(0 == aligned_size){
|
||||
S3FS_PRN_DBG("After the untreated area(start=%lld, size=%lld) is aligned with the boundary(start is %lld), the aligned size is empty, so nothing to do.", static_cast<long long int>(last_untreated_start), static_cast<long long int>(last_untreated_size), static_cast<long long int>(aligned_start));
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t front_rem_start = last_untreated_start; // start of the remainder untreated area in front of the boundary
|
||||
off_t front_rem_size = aligned_start - last_untreated_start; // size of the remainder untreated area in front of the boundary
|
||||
|
||||
//
|
||||
// Get the area for uploading, if last update treated area can be uploaded.
|
||||
//
|
||||
// [NOTE]
|
||||
// * Create the updoad area list, if the untreated area aligned with the boundary
|
||||
// exceeds the maximum upload size.
|
||||
// * If it overlaps with an area that has already been uploaded(unloaded list),
|
||||
// that area is added to the cancellation list and included in the untreated area.
|
||||
//
|
||||
mp_part_list_t to_upload_list;
|
||||
filepart_list_t cancel_uploaded_list;
|
||||
if(!ExtractUploadPartsFromUntreatedArea(aligned_start, aligned_size, to_upload_list, cancel_uploaded_list, S3fsCurl::GetMultipartSize())){
|
||||
S3FS_PRN_ERR("Failed to extract upload parts from last untreated area.");
|
||||
return -EIO;
|
||||
}
|
||||
if(to_upload_list.empty()){
|
||||
S3FS_PRN_INFO("There is nothing to upload. In most cases, the untreated area does not meet the upload size.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Has multipart uploading already started?
|
||||
//
|
||||
if(!IsUploading()){
|
||||
// Multipart uploading hasn't started yet, so start it.
|
||||
//
|
||||
S3fsCurl s3fscurl(true);
|
||||
std::string tmp_upload_id;
|
||||
int result;
|
||||
if(0 != (result = s3fscurl.PreMultipartPostRequest(path, meta, tmp_upload_id, true))){
|
||||
S3FS_PRN_ERR("failed to setup multipart upload(create upload id) by errno(%d)", result);
|
||||
return result;
|
||||
}
|
||||
if(!RowInitialUploadInfo(tmp_upload_id, false/* not need to cancel */, AutoLock::ALREADY_LOCKED)){
|
||||
S3FS_PRN_ERR("failed to setup multipart upload(set upload id to object)");
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Output debug level information
|
||||
//
|
||||
// When canceling(overwriting) a part that has already been uploaded, output it.
|
||||
//
|
||||
if(S3fsLog::IsS3fsLogDbg()){
|
||||
for(filepart_list_t::const_iterator cancel_iter = cancel_uploaded_list.begin(); cancel_iter != cancel_uploaded_list.end(); ++cancel_iter){
|
||||
S3FS_PRN_DBG("Cancel uploaded: start(%lld), size(%lld), part number(%d)", static_cast<long long int>(cancel_iter->startpos), static_cast<long long int>(cancel_iter->size), (cancel_iter->petag ? cancel_iter->petag->part_num : -1));
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Upload Multipart parts
|
||||
//
|
||||
if(!ParallelMultipartUpload(path, to_upload_list, false, AutoLock::ALREADY_LOCKED)){
|
||||
S3FS_PRN_ERR("Failed to upload multipart parts.");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
//
|
||||
// Exclude the uploaded Untreated area and update the last Untreated area.
|
||||
//
|
||||
off_t behind_rem_start = aligned_start + aligned_size;
|
||||
off_t behind_rem_size = (last_untreated_start + last_untreated_size) - behind_rem_start;
|
||||
|
||||
if(!pfdent->ReplaceLastUpdateUntreatedPart(front_rem_start, front_rem_size, behind_rem_start, behind_rem_size)){
|
||||
S3FS_PRN_WARN("The last untreated area could not be detected and the uploaded area could not be excluded from it, but continue because it does not affect the overall processing.");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int PseudoFdInfo::WaitAllThreadsExit()
|
||||
{
|
||||
int result;
|
||||
bool is_loop = true;
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
if(0 == instruct_count && 0 == completed_count){
|
||||
result = last_result;
|
||||
is_loop = false;
|
||||
}
|
||||
}
|
||||
|
||||
while(is_loop){
|
||||
// need to wait the worker exiting
|
||||
uploaded_sem.wait();
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
if(0 < completed_count){
|
||||
--completed_count;
|
||||
}
|
||||
if(0 == instruct_count && 0 == completed_count){
|
||||
// break loop
|
||||
result = last_result;
|
||||
is_loop = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::CancelAllThreads()
|
||||
{
|
||||
bool need_cancel = false;
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
if(0 < instruct_count && 0 < completed_count){
|
||||
S3FS_PRN_INFO("The upload thread is running, so cancel them and wait for the end.");
|
||||
need_cancel = true;
|
||||
last_result = -ECANCELED; // to stop thread running
|
||||
}
|
||||
}
|
||||
if(need_cancel){
|
||||
WaitAllThreadsExit();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Extract the list for multipart upload from the Unteated Area
|
||||
//
|
||||
// The untreated_start parameter must be set aligning it with the boundaries
|
||||
// of the maximum multipart upload size. This method expects it to be bounded.
|
||||
//
|
||||
// This method creates the upload area aligned from the untreated area by
|
||||
// maximum size and creates the required list.
|
||||
// If it overlaps with an area that has already been uploaded, the overlapped
|
||||
// upload area will be canceled and absorbed by the untreated area.
|
||||
// If the list creation process is complete and areas smaller than the maximum
|
||||
// size remain, those area will be reset to untreated_start and untreated_size
|
||||
// and returned to the caller.
|
||||
// If the called untreated area is smaller than the maximum size of the
|
||||
// multipart upload, no list will be created.
|
||||
//
|
||||
// [NOTE]
|
||||
// Maximum multipart upload size must be uploading boundary.
|
||||
//
|
||||
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
|
||||
{
|
||||
if(untreated_start < 0 || untreated_size <= 0){
|
||||
S3FS_PRN_ERR("Paramters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Initiliaze lists
|
||||
to_upload_list.clear();
|
||||
cancel_upload_list.clear();
|
||||
|
||||
//
|
||||
// Align start position with maximum multipart upload boundaries
|
||||
//
|
||||
off_t aligned_start = (untreated_start / max_mp_size) * max_mp_size;
|
||||
off_t aligned_size = untreated_size + (untreated_start - aligned_start);
|
||||
|
||||
//
|
||||
// Check aligned untreated size
|
||||
//
|
||||
if(aligned_size < max_mp_size){
|
||||
S3FS_PRN_INFO("untreated area(start=%lld, size=%lld) to aligned boundary(start=%lld, size=%lld) is smaller than max mp size(%lld), so nothing to do.", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size), static_cast<long long int>(aligned_start), static_cast<long long int>(aligned_size), static_cast<long long int>(max_mp_size));
|
||||
return true; // successful termination
|
||||
}
|
||||
|
||||
//
|
||||
// Check each unloaded area in list
|
||||
//
|
||||
// [NOTE]
|
||||
// The uploaded area must be to be aligned by boundary.
|
||||
// Also, it is assumed that it must not be a copy area.
|
||||
// So if the areas overlap, include uploaded area as an untreated area.
|
||||
//
|
||||
for(filepart_list_t::iterator cur_iter = upload_list.begin(); cur_iter != upload_list.end(); /* ++cur_iter */){
|
||||
// Check overlap
|
||||
if((cur_iter->startpos + cur_iter->size - 1) < aligned_start || (aligned_start + aligned_size - 1) < cur_iter->startpos){
|
||||
// Areas do not overlap
|
||||
++cur_iter;
|
||||
|
||||
}else{
|
||||
// The areas overlap
|
||||
//
|
||||
// Since the start position of the uploaded area is aligned with the boundary,
|
||||
// it is not necessary to check the start position.
|
||||
// If the uploaded area exceeds the untreated area, expand the untreated area.
|
||||
//
|
||||
if((aligned_start + aligned_size - 1) < (cur_iter->startpos + cur_iter->size - 1)){
|
||||
aligned_size += (cur_iter->startpos + cur_iter->size) - (aligned_start + aligned_size);
|
||||
}
|
||||
|
||||
//
|
||||
// Add this to cancel list
|
||||
//
|
||||
cancel_upload_list.push_back(*cur_iter); // Copy and Push to cancel list
|
||||
cur_iter = upload_list.erase(cur_iter);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Add upload area to the list
|
||||
//
|
||||
while(max_mp_size <= aligned_size){
|
||||
int part_num = static_cast<int>((aligned_start / max_mp_size) + 1);
|
||||
to_upload_list.emplace_back(aligned_start, max_mp_size, part_num);
|
||||
|
||||
aligned_start += max_mp_size;
|
||||
aligned_size -= max_mp_size;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Extract the area lists to be uploaded/downloaded for the entire file.
|
||||
//
|
||||
// [Parameters]
|
||||
// to_upload_list : A list of areas to upload in multipart upload.
|
||||
// to_copy_list : A list of areas for copy upload in multipart upload.
|
||||
// to_download_list : A list of areas that must be downloaded before multipart upload.
|
||||
// cancel_upload_list : A list of areas that have already been uploaded and will be canceled(overwritten).
|
||||
// wait_upload_complete : If cancellation areas exist, this flag is set to true when it is necessary to wait until the upload of those cancellation areas is complete.
|
||||
// file_size : The size of the upload file.
|
||||
// use_copy : Specify true if copy multipart upload is available.
|
||||
//
|
||||
// [NOTE]
|
||||
// The untreated_list in fdentity does not change, but upload_list is changed.
|
||||
// (If you want to restore it, you can use cancel_upload_list.)
|
||||
//
|
||||
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
|
||||
return untreated_list.ClearParts(start, size);
|
||||
}
|
||||
// Initiliaze lists
|
||||
to_upload_list.clear();
|
||||
to_copy_list.clear();
|
||||
to_download_list.clear();
|
||||
cancel_upload_list.clear();
|
||||
wait_upload_complete = false;
|
||||
|
||||
bool PseudoFdInfo::GetUntreated(off_t& start, off_t& size, off_t max_size, off_t min_size)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
// Duplicate untreated list
|
||||
untreated_list_t dup_untreated_list;
|
||||
untreated_list.Duplicate(dup_untreated_list);
|
||||
|
||||
return untreated_list.GetPart(start, size, max_size, min_size);
|
||||
}
|
||||
// Initialize the iterator of each list first
|
||||
untreated_list_t::iterator dup_untreated_iter = dup_untreated_list.begin();
|
||||
filepart_list_t::iterator uploaded_iter = upload_list.begin();
|
||||
|
||||
bool PseudoFdInfo::GetLastUntreated(off_t& start, off_t& size, off_t max_size, off_t min_size)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
//
|
||||
// Loop to extract areas to upload and download
|
||||
//
|
||||
// Check at the boundary of the maximum upload size from the beginning of the file
|
||||
//
|
||||
for(off_t cur_start = 0, cur_size = 0; cur_start < file_size; cur_start += cur_size){
|
||||
//
|
||||
// Set part size
|
||||
// (To avoid confusion, the area to be checked is called the "current area".)
|
||||
//
|
||||
cur_size = ((cur_start + max_mp_size) <= file_size ? max_mp_size : (file_size - cur_start));
|
||||
|
||||
return untreated_list.GetLastUpdatedPart(start, size, max_size, min_size);
|
||||
}
|
||||
//
|
||||
// Extract the untreated erea that overlaps this current area.
|
||||
// (The extracted area is deleted from dup_untreated_list.)
|
||||
//
|
||||
untreated_list_t cur_untreated_list;
|
||||
for(cur_untreated_list.clear(); dup_untreated_iter != dup_untreated_list.end(); ){
|
||||
if((dup_untreated_iter->start < (cur_start + cur_size)) && (cur_start < (dup_untreated_iter->start + dup_untreated_iter->size))){
|
||||
// this untreated area is overlap
|
||||
off_t tmp_untreated_start;
|
||||
off_t tmp_untreated_size;
|
||||
if(dup_untreated_iter->start < cur_start){
|
||||
// [NOTE]
|
||||
// This untreated area overlaps with the current area, but starts
|
||||
// in front of the target area.
|
||||
// This state should not be possible, but if this state is detected,
|
||||
// the part before the target area will be deleted.
|
||||
//
|
||||
tmp_untreated_start = cur_start;
|
||||
tmp_untreated_size = dup_untreated_iter->size - (cur_start - dup_untreated_iter->start);
|
||||
}else{
|
||||
tmp_untreated_start = dup_untreated_iter->start;
|
||||
tmp_untreated_size = dup_untreated_iter->size;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::AddUntreated(off_t start, off_t size)
|
||||
{
|
||||
AutoLock auto_lock(&upload_list_lock);
|
||||
//
|
||||
// Check the end of the overlapping untreated area.
|
||||
//
|
||||
if((tmp_untreated_start + tmp_untreated_size) <= (cur_start + cur_size)){
|
||||
//
|
||||
// All of untreated areas are within the current area
|
||||
//
|
||||
// - Add this untreated area to cur_untreated_list
|
||||
// - Delete this from dup_untreated_list
|
||||
//
|
||||
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
|
||||
dup_untreated_iter = dup_untreated_list.erase(dup_untreated_iter);
|
||||
}else{
|
||||
//
|
||||
// The untreated area exceeds the end of the current area
|
||||
//
|
||||
|
||||
return untreated_list.AddPart(start, size);
|
||||
// Ajust untreated area
|
||||
tmp_untreated_size = (cur_start + cur_size) - tmp_untreated_start;
|
||||
|
||||
// Add ajusted untreated area to cur_untreated_list
|
||||
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
|
||||
|
||||
// Remove this ajusted untreated area from the area pointed
|
||||
// to by dup_untreated_iter.
|
||||
dup_untreated_iter->size = (dup_untreated_iter->start + dup_untreated_iter->size) - (cur_start + cur_size);
|
||||
dup_untreated_iter->start = tmp_untreated_start + tmp_untreated_size;
|
||||
}
|
||||
|
||||
}else if((cur_start + cur_size - 1) < dup_untreated_iter->start){
|
||||
// this untreated area is over the current area, thus break loop.
|
||||
break;
|
||||
}else{
|
||||
++dup_untreated_iter;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Check uploaded area
|
||||
//
|
||||
// [NOTE]
|
||||
// The uploaded area should be aligned with the maximum upload size boundary.
|
||||
// It also assumes that each size of uploaded area must be a maximum upload
|
||||
// size.
|
||||
//
|
||||
filepart_list_t::iterator overlap_uploaded_iter = upload_list.end();
|
||||
for(; uploaded_iter != upload_list.end(); ++uploaded_iter){
|
||||
if((cur_start < (uploaded_iter->startpos + uploaded_iter->size)) && (uploaded_iter->startpos < (cur_start + cur_size))){
|
||||
if(overlap_uploaded_iter != upload_list.end()){
|
||||
//
|
||||
// Something wrong in this unloaded area.
|
||||
//
|
||||
// This area is not aligned with the boundary, then this condition
|
||||
// is unrecoverable and return failure.
|
||||
//
|
||||
S3FS_PRN_ERR("The uploaded list may not be the boundary for the maximum multipart upload size. No further processing is possible.");
|
||||
return false;
|
||||
}
|
||||
// Set this iterator to overlap iter
|
||||
overlap_uploaded_iter = uploaded_iter;
|
||||
|
||||
}else if((cur_start + cur_size - 1) < uploaded_iter->startpos){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Create upload/download/cancel/copy list for this current area
|
||||
//
|
||||
int part_num = static_cast<int>((cur_start / max_mp_size) + 1);
|
||||
if(cur_untreated_list.empty()){
|
||||
//
|
||||
// No untreated area was detected in this current area
|
||||
//
|
||||
if(overlap_uploaded_iter != upload_list.end()){
|
||||
//
|
||||
// This current area already uploaded, then nothing to add to lists.
|
||||
//
|
||||
S3FS_PRN_DBG("Already uploaded: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
|
||||
}else{
|
||||
//
|
||||
// This current area has not been uploaded
|
||||
// (neither an uploaded area nor an untreated area.)
|
||||
//
|
||||
if(use_copy){
|
||||
//
|
||||
// Copy multipart upload available
|
||||
//
|
||||
S3FS_PRN_DBG("To copy: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
to_copy_list.emplace_back(cur_start, cur_size, part_num);
|
||||
}else{
|
||||
//
|
||||
// This current area needs to be downloaded and uploaded
|
||||
//
|
||||
S3FS_PRN_DBG("To download and upload: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
to_download_list.emplace_back(cur_start, cur_size);
|
||||
to_upload_list.emplace_back(cur_start, cur_size, part_num);
|
||||
}
|
||||
}
|
||||
}else{
|
||||
//
|
||||
// Found untreated area in this current area
|
||||
//
|
||||
if(overlap_uploaded_iter != upload_list.end()){
|
||||
//
|
||||
// This current area is also the uploaded area
|
||||
//
|
||||
// [NOTE]
|
||||
// The uploaded area is aligned with boundary, there are all data in
|
||||
// this current area locally(which includes all data of untreated area).
|
||||
// So this current area only needs to be uploaded again.
|
||||
//
|
||||
S3FS_PRN_DBG("Cancel upload: start=%lld, size=%lld", static_cast<long long int>(overlap_uploaded_iter->startpos), static_cast<long long int>(overlap_uploaded_iter->size));
|
||||
|
||||
if(!overlap_uploaded_iter->uploaded){
|
||||
S3FS_PRN_DBG("This cancel upload area is still uploading, so you must wait for it to complete before starting any Stream uploads.");
|
||||
wait_upload_complete = true;
|
||||
}
|
||||
cancel_upload_list.push_back(*overlap_uploaded_iter); // add this uploaded area to cancel_upload_list
|
||||
uploaded_iter = upload_list.erase(overlap_uploaded_iter); // remove it from upload_list
|
||||
|
||||
S3FS_PRN_DBG("To upload: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
to_upload_list.emplace_back(cur_start, cur_size, part_num); // add new uploading area to list
|
||||
|
||||
}else{
|
||||
//
|
||||
// No uploaded area overlap this current area
|
||||
// (Areas other than the untreated area must be downloaded.)
|
||||
//
|
||||
// [NOTE]
|
||||
// Need to consider the case where there is a gap between the start
|
||||
// of the current area and the untreated area.
|
||||
// This gap is the area that should normally be downloaded.
|
||||
// But it is the area that can be copied if we can use copy multipart
|
||||
// upload. Then If we can use copy multipart upload and the previous
|
||||
// area is used copy multipart upload, this gap will be absorbed by
|
||||
// the previous area.
|
||||
// Unifying the copy multipart upload area can reduce the number of
|
||||
// upload requests.
|
||||
//
|
||||
off_t tmp_cur_start = cur_start;
|
||||
off_t tmp_cur_size = cur_size;
|
||||
off_t changed_start = cur_start;
|
||||
off_t changed_size = cur_size;
|
||||
bool first_area = true;
|
||||
for(untreated_list_t::const_iterator tmp_cur_untreated_iter = cur_untreated_list.begin(); tmp_cur_untreated_iter != cur_untreated_list.end(); ++tmp_cur_untreated_iter, first_area = false){
|
||||
if(tmp_cur_start < tmp_cur_untreated_iter->start){
|
||||
//
|
||||
// Detected a gap at the start of area
|
||||
//
|
||||
bool include_prev_copy_part = false;
|
||||
if(first_area && use_copy && !to_copy_list.empty()){
|
||||
//
|
||||
// Make sure that the area of the last item in to_copy_list
|
||||
// is contiguous with this current area.
|
||||
//
|
||||
// [NOTE]
|
||||
// Areas can be unified if the total size of the areas is
|
||||
// within 5GB and the remaining area after unification is
|
||||
// larger than the minimum multipart upload size.
|
||||
//
|
||||
mp_part_list_t::reverse_iterator copy_riter = to_copy_list.rbegin();
|
||||
|
||||
if( (copy_riter->start + copy_riter->size) == tmp_cur_start &&
|
||||
(copy_riter->size + (tmp_cur_untreated_iter->start - tmp_cur_start)) <= FIVE_GB &&
|
||||
((tmp_cur_start + tmp_cur_size) - tmp_cur_untreated_iter->start) >= MIN_MULTIPART_SIZE )
|
||||
{
|
||||
//
|
||||
// Unify to this area to previouse copy area.
|
||||
//
|
||||
copy_riter->size += tmp_cur_untreated_iter->start - tmp_cur_start;
|
||||
S3FS_PRN_DBG("Resize to copy: start=%lld, size=%lld", static_cast<long long int>(copy_riter->start), static_cast<long long int>(copy_riter->size));
|
||||
|
||||
changed_size -= (tmp_cur_untreated_iter->start - changed_start);
|
||||
changed_start = tmp_cur_untreated_iter->start;
|
||||
include_prev_copy_part = true;
|
||||
}
|
||||
}
|
||||
if(!include_prev_copy_part){
|
||||
//
|
||||
// If this area is not unified, need to download this area
|
||||
//
|
||||
S3FS_PRN_DBG("To download: start=%lld, size=%lld", static_cast<long long int>(tmp_cur_start), static_cast<long long int>(tmp_cur_untreated_iter->start - tmp_cur_start));
|
||||
to_download_list.emplace_back(tmp_cur_start, tmp_cur_untreated_iter->start - tmp_cur_start);
|
||||
}
|
||||
}
|
||||
//
|
||||
// Set next start position
|
||||
//
|
||||
tmp_cur_size = (tmp_cur_start + tmp_cur_size) - (tmp_cur_untreated_iter->start + tmp_cur_untreated_iter->size);
|
||||
tmp_cur_start = tmp_cur_untreated_iter->start + tmp_cur_untreated_iter->size;
|
||||
}
|
||||
|
||||
//
|
||||
// Add download area to list, if remaining size
|
||||
//
|
||||
if(0 < tmp_cur_size){
|
||||
S3FS_PRN_DBG("To download: start=%lld, size=%lld", static_cast<long long int>(tmp_cur_start), static_cast<long long int>(tmp_cur_size));
|
||||
to_download_list.emplace_back(tmp_cur_start, tmp_cur_size);
|
||||
}
|
||||
|
||||
//
|
||||
// Set upload area(whole of area) to list
|
||||
//
|
||||
S3FS_PRN_DBG("To upload: start=%lld, size=%lld", static_cast<long long int>(changed_start), static_cast<long long int>(changed_size));
|
||||
to_upload_list.emplace_back(changed_start, changed_size, part_num);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -21,7 +21,35 @@
|
||||
#ifndef S3FS_FDCACHE_FDINFO_H_
|
||||
#define S3FS_FDCACHE_FDINFO_H_
|
||||
|
||||
#include "fdcache_untreated.h"
|
||||
#include <memory>
|
||||
|
||||
#include "psemaphore.h"
|
||||
#include "metaheader.h"
|
||||
#include "autolock.h"
|
||||
#include "types.h"
|
||||
|
||||
class FdEntity;
|
||||
class UntreatedParts;
|
||||
|
||||
//------------------------------------------------
|
||||
// Structure of parameters to pass to thread
|
||||
//------------------------------------------------
|
||||
class PseudoFdInfo;
|
||||
|
||||
struct pseudofdinfo_thparam
|
||||
{
|
||||
PseudoFdInfo* ppseudofdinfo;
|
||||
std::string path;
|
||||
std::string upload_id;
|
||||
int upload_fd;
|
||||
off_t start;
|
||||
off_t size;
|
||||
bool is_copy;
|
||||
int part_num;
|
||||
etagpair* petag;
|
||||
|
||||
pseudofdinfo_thparam() : ppseudofdinfo(nullptr), path(""), upload_id(""), upload_fd(-1), start(0), size(0), is_copy(false), part_num(-1), petag(nullptr) {}
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PseudoFdInfo
|
||||
@ -29,23 +57,44 @@
|
||||
class PseudoFdInfo
|
||||
{
|
||||
private:
|
||||
int pseudo_fd;
|
||||
int physical_fd;
|
||||
int flags; // flags at open
|
||||
std::string upload_id;
|
||||
filepart_list_t upload_list;
|
||||
UntreatedParts untreated_list; // list of untreated parts that have been written and not yet uploaded(for streamupload)
|
||||
etaglist_t etag_entities; // list of etag string and part number entities(to maintain the etag entity even if MPPART_INFO is destroyed)
|
||||
static int max_threads;
|
||||
static int opt_max_threads; // for option value
|
||||
|
||||
bool is_lock_init;
|
||||
pthread_mutex_t upload_list_lock; // protects upload_id and upload_list
|
||||
int pseudo_fd;
|
||||
int physical_fd;
|
||||
int flags; // flags at open
|
||||
std::string upload_id;
|
||||
int upload_fd; // duplicated fd for uploading
|
||||
filepart_list_t upload_list;
|
||||
petagpool etag_entities; // list of etag string and part number entities(to maintain the etag entity even if MPPART_INFO is destroyed)
|
||||
bool is_lock_init;
|
||||
mutable pthread_mutex_t upload_list_lock; // protects upload_id and upload_list
|
||||
Semaphore uploaded_sem; // use a semaphore to trigger an upload completion like event flag
|
||||
int instruct_count; // number of instructions for processing by threads
|
||||
int completed_count; // number of completed processes by thread
|
||||
int last_result; // the result of thread processing
|
||||
|
||||
private:
|
||||
static void* MultipartUploadThreadWorker(void* arg);
|
||||
|
||||
bool Clear();
|
||||
void CloseUploadFd();
|
||||
bool OpenUploadFd(AutoLock::Type type = AutoLock::NONE);
|
||||
bool ResetUploadInfo(AutoLock::Type type);
|
||||
bool RowInitialUploadInfo(const std::string& id, bool is_cancel_mp, AutoLock::Type type);
|
||||
bool CompleteInstruction(int result, AutoLock::Type type = AutoLock::NONE);
|
||||
bool ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy, AutoLock::Type type = AutoLock::NONE);
|
||||
bool InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type = AutoLock::NONE);
|
||||
bool CancelAllThreads();
|
||||
bool ExtractUploadPartsFromUntreatedArea(const off_t& untreated_start, const off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);
|
||||
|
||||
public:
|
||||
PseudoFdInfo(int fd = -1, int open_flags = 0);
|
||||
explicit PseudoFdInfo(int fd = -1, int open_flags = 0);
|
||||
~PseudoFdInfo();
|
||||
PseudoFdInfo(const PseudoFdInfo&) = delete;
|
||||
PseudoFdInfo(PseudoFdInfo&&) = delete;
|
||||
PseudoFdInfo& operator=(const PseudoFdInfo&) = delete;
|
||||
PseudoFdInfo& operator=(PseudoFdInfo&&) = delete;
|
||||
|
||||
int GetPhysicalFd() const { return physical_fd; }
|
||||
int GetPseudoFd() const { return pseudo_fd; }
|
||||
@ -54,23 +103,23 @@ class PseudoFdInfo
|
||||
bool Readable() const;
|
||||
|
||||
bool Set(int fd, int open_flags);
|
||||
bool ClearUploadInfo(bool is_clear_part = false, bool lock_already_held = false);
|
||||
bool InitialUploadInfo(const std::string& id);
|
||||
bool ClearUploadInfo(bool is_cancel_mp = false);
|
||||
bool InitialUploadInfo(const std::string& id){ return RowInitialUploadInfo(id, true, AutoLock::NONE); }
|
||||
|
||||
bool IsUploading() const { return !upload_id.empty(); }
|
||||
bool GetUploadId(std::string& id) const;
|
||||
bool GetEtaglist(etaglist_t& list);
|
||||
bool GetEtaglist(etaglist_t& list) const;
|
||||
|
||||
bool AppendUploadPart(off_t start, off_t size, bool is_copy = false, etagpair** ppetag = NULL);
|
||||
bool AppendUploadPart(off_t start, off_t size, bool is_copy = false, etagpair** ppetag = nullptr);
|
||||
|
||||
void ClearUntreated(bool lock_already_held = false);
|
||||
bool ClearUntreated(off_t start, off_t size);
|
||||
bool GetUntreated(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE);
|
||||
bool GetLastUntreated(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE);
|
||||
bool AddUntreated(off_t start, off_t size);
|
||||
bool ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result);
|
||||
|
||||
int WaitAllThreadsExit();
|
||||
ssize_t UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent);
|
||||
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy);
|
||||
};
|
||||
|
||||
typedef std::map<int, class PseudoFdInfo*> fdinfo_map_t;
|
||||
typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
|
||||
|
||||
#endif // S3FS_FDCACHE_FDINFO_H_
|
||||
|
||||
|
||||
@ -19,31 +19,35 @@
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <memory>
|
||||
#include <unistd.h>
|
||||
#include <sstream>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile()
|
||||
static constexpr int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile()
|
||||
|
||||
//------------------------------------------------
|
||||
// fdpage_list_t utility
|
||||
//------------------------------------------------
|
||||
// Inline function for repeated processing
|
||||
inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, const fdpage& orgpage, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
{
|
||||
if(0 < page.bytes){
|
||||
if(0 < orgpage.bytes){
|
||||
// [NOTE]
|
||||
// The page variable is subject to change here.
|
||||
//
|
||||
fdpage page = orgpage;
|
||||
|
||||
if(ignore_load){
|
||||
page.loaded = default_load;
|
||||
}
|
||||
@ -65,75 +69,71 @@ inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page,
|
||||
// Zero size pages will be deleted. However, if the page information is the only one,
|
||||
// it will be left behind. This is what you need to do to create a new empty file.
|
||||
//
|
||||
static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
static void raw_compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t& compressed_pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
{
|
||||
fdpage_list_t compressed_pages;
|
||||
fdpage tmppage;
|
||||
bool is_first = true;
|
||||
compressed_pages.clear();
|
||||
|
||||
fdpage* lastpage = nullptr;
|
||||
fdpage_list_t::iterator add_iter;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(!is_first){
|
||||
if(0 < tmppage.bytes){
|
||||
if( (!ignore_load && (tmppage.loaded != iter->loaded )) ||
|
||||
(!ignore_modify && (tmppage.modified != iter->modified)) )
|
||||
if(0 == iter->bytes){
|
||||
continue;
|
||||
}
|
||||
if(!lastpage){
|
||||
// First item
|
||||
raw_add_compress_fdpage_list(compressed_pages, (*iter), ignore_load, ignore_modify, default_load, default_modify);
|
||||
lastpage = &(compressed_pages.back());
|
||||
}else{
|
||||
// check page continuity
|
||||
if(lastpage->next() != iter->offset){
|
||||
// Non-consecutive with last page, so add a page filled with default values
|
||||
if( (!ignore_load && (lastpage->loaded != false)) ||
|
||||
(!ignore_modify && (lastpage->modified != false)) )
|
||||
{
|
||||
// Different from the previous area, add it to list
|
||||
// add new page
|
||||
fdpage tmppage(lastpage->next(), (iter->offset - lastpage->next()), false, false);
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
add_iter = compressed_pages.end();
|
||||
--add_iter;
|
||||
lastpage = &(*add_iter);
|
||||
}else{
|
||||
// Same as the previous area
|
||||
if(tmppage.next() != iter->offset){
|
||||
// These are not contiguous areas, add it to list
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}else{
|
||||
// These are contiguous areas
|
||||
|
||||
// add current area
|
||||
tmppage.bytes += iter->bytes;
|
||||
}
|
||||
// Expand last area
|
||||
lastpage->bytes = iter->offset - lastpage->offset;
|
||||
}
|
||||
}else{
|
||||
// if found empty page, skip it
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}
|
||||
}else{
|
||||
// first erea
|
||||
is_first = false;
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
// add current page
|
||||
if( (!ignore_load && (lastpage->loaded != iter->loaded )) ||
|
||||
(!ignore_modify && (lastpage->modified != iter->modified)) )
|
||||
{
|
||||
// Add new page
|
||||
raw_add_compress_fdpage_list(compressed_pages, (*iter), ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
add_iter = compressed_pages.end();
|
||||
--add_iter;
|
||||
lastpage = &(*add_iter);
|
||||
}else{
|
||||
// Expand last area
|
||||
lastpage->bytes += iter->bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
// add last area
|
||||
if(!is_first){
|
||||
// [NOTE]
|
||||
// Zero size pages are not allowed. However, if it is the only one, allow it.
|
||||
// This is a special process that exists only to create empty files.
|
||||
//
|
||||
if(compressed_pages.empty() || 0 != tmppage.bytes){
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
}
|
||||
}
|
||||
return compressed_pages;
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list_ignore_modify(const fdpage_list_t& pages, bool default_modify)
|
||||
static void compress_fdpage_list_ignore_modify(const fdpage_list_t& pages, fdpage_list_t& compressed_pages, bool default_modify)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ true, /* default_load= */false, /* default_modify= */default_modify);
|
||||
raw_compress_fdpage_list(pages, compressed_pages, /* ignore_load= */ false, /* ignore_modify= */ true, /* default_load= */false, /* default_modify= */default_modify);
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list_ignore_load(const fdpage_list_t& pages, bool default_load)
|
||||
static void compress_fdpage_list_ignore_load(const fdpage_list_t& pages, fdpage_list_t& compressed_pages, bool default_load)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ true, /* ignore_modify= */ false, /* default_load= */default_load, /* default_modify= */false);
|
||||
raw_compress_fdpage_list(pages, compressed_pages, /* ignore_load= */ true, /* ignore_modify= */ false, /* default_load= */default_load, /* default_modify= */false);
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list(const fdpage_list_t& pages)
|
||||
static void compress_fdpage_list(const fdpage_list_t& pages, fdpage_list_t& compressed_pages)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ false, /* default_load= */false, /* default_modify= */false);
|
||||
raw_compress_fdpage_list(pages, compressed_pages, /* ignore_load= */ false, /* ignore_modify= */ false, /* default_load= */false, /* default_modify= */false);
|
||||
}
|
||||
|
||||
static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize)
|
||||
@ -233,7 +233,7 @@ bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& spars
|
||||
//
|
||||
bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
{
|
||||
char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE];
|
||||
std::unique_ptr<char[]> readbuff(new char[CHECK_CACHEFILE_PART_SIZE]);
|
||||
|
||||
for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){
|
||||
if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){
|
||||
@ -243,7 +243,7 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
}
|
||||
bool found_bad_data = false;
|
||||
ssize_t read_bytes;
|
||||
if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){
|
||||
if(-1 == (read_bytes = pread(fd, readbuff.get(), check_bytes, (start + comp_bytes)))){
|
||||
S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(physical_fd=%d).", check_bytes, static_cast<long long int>(start + comp_bytes), fd);
|
||||
found_bad_data = true;
|
||||
}else{
|
||||
@ -257,11 +257,9 @@ bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
}
|
||||
}
|
||||
if(found_bad_data){
|
||||
delete[] readbuff;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
delete[] readbuff;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -360,14 +358,6 @@ PageList::PageList(off_t size, bool is_loaded, bool is_modified, bool shrinked)
|
||||
Init(size, is_loaded, is_modified);
|
||||
}
|
||||
|
||||
PageList::PageList(const PageList& other)
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){
|
||||
pages.push_back(*iter);
|
||||
}
|
||||
is_shrink = other.is_shrink;
|
||||
}
|
||||
|
||||
PageList::~PageList()
|
||||
{
|
||||
Clear();
|
||||
@ -400,7 +390,38 @@ off_t PageList::Size() const
|
||||
|
||||
bool PageList::Compress()
|
||||
{
|
||||
pages = compress_fdpage_list(pages);
|
||||
fdpage* lastpage = nullptr;
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){
|
||||
if(!lastpage){
|
||||
// First item
|
||||
lastpage = &(*iter);
|
||||
++iter;
|
||||
}else{
|
||||
// check page continuity
|
||||
if(lastpage->next() != iter->offset){
|
||||
// Non-consecutive with last page, so add a page filled with default values
|
||||
if(lastpage->loaded || lastpage->modified){
|
||||
// insert new page before current pos
|
||||
fdpage tmppage(lastpage->next(), (iter->offset - lastpage->next()), false, false);
|
||||
iter = pages.insert(iter, tmppage);
|
||||
lastpage = &(*iter);
|
||||
++iter;
|
||||
}else{
|
||||
// Expand last area
|
||||
lastpage->bytes = iter->offset - lastpage->offset;
|
||||
}
|
||||
}
|
||||
// check current page
|
||||
if(lastpage->loaded == iter->loaded && lastpage->modified == iter->modified){
|
||||
// Expand last area and remove current pos
|
||||
lastpage->bytes += iter->bytes;
|
||||
iter = pages.erase(iter);
|
||||
}else{
|
||||
lastpage = &(*iter);
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -426,7 +447,13 @@ bool PageList::Resize(off_t size, bool is_loaded, bool is_modified)
|
||||
off_t total = Size();
|
||||
|
||||
if(0 == total){
|
||||
// [NOTE]
|
||||
// The is_shrink flag remains unchanged in this function.
|
||||
//
|
||||
bool backup_is_shrink = is_shrink;
|
||||
|
||||
Init(size, is_loaded, is_modified);
|
||||
is_shrink = backup_is_shrink;
|
||||
|
||||
}else if(total < size){
|
||||
// add new area
|
||||
@ -475,8 +502,8 @@ bool PageList::IsPageLoaded(off_t start, off_t size) const
|
||||
bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress)
|
||||
{
|
||||
off_t now_size = Size();
|
||||
bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus);
|
||||
bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus);
|
||||
bool is_loaded = (page_status::LOAD_MODIFIED == pstatus || page_status::LOADED == pstatus);
|
||||
bool is_modified = (page_status::LOAD_MODIFIED == pstatus || page_status::MODIFIED == pstatus);
|
||||
|
||||
if(now_size <= start){
|
||||
if(now_size < start){
|
||||
@ -621,14 +648,14 @@ size_t PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off
|
||||
bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize)
|
||||
{
|
||||
// compress before this processing
|
||||
if(!Compress()){
|
||||
return false;
|
||||
}
|
||||
Compress(); // always true
|
||||
|
||||
// make a list by modified flag
|
||||
fdpage_list_t modified_pages = compress_fdpage_list_ignore_load(pages, false);
|
||||
fdpage_list_t modified_pages;
|
||||
fdpage_list_t download_pages; // A non-contiguous page list showing the areas that need to be downloaded
|
||||
fdpage_list_t mixupload_pages; // A continuous page list showing only modified flags for mixupload
|
||||
compress_fdpage_list_ignore_load(pages, modified_pages, false);
|
||||
|
||||
fdpage prev_page;
|
||||
for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
@ -709,8 +736,8 @@ bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_lis
|
||||
}
|
||||
|
||||
// compress
|
||||
dlpages = compress_fdpage_list_ignore_modify(download_pages, false);
|
||||
mixuppages = compress_fdpage_list_ignore_load(mixupload_pages, false);
|
||||
compress_fdpage_list_ignore_modify(download_pages, dlpages, false);
|
||||
compress_fdpage_list_ignore_load(mixupload_pages, mixuppages, false);
|
||||
|
||||
// parse by max pagesize
|
||||
dlpages = parse_partsize_fdpage_list(dlpages, max_partsize);
|
||||
@ -722,9 +749,7 @@ bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_lis
|
||||
bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size_t size)
|
||||
{
|
||||
// compress before this processing
|
||||
if(!Compress()){
|
||||
return false;
|
||||
}
|
||||
Compress(); // always true
|
||||
|
||||
// extract areas without data
|
||||
fdpage_list_t tmp_pagelist;
|
||||
@ -742,7 +767,7 @@ bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size
|
||||
|
||||
fdpage tmppage;
|
||||
tmppage.offset = std::max(iter->offset, start);
|
||||
tmppage.bytes = (-1 != stop_pos ? iter->bytes : std::min(iter->bytes, (stop_pos - tmppage.offset)));
|
||||
tmppage.bytes = (-1 == stop_pos ? iter->bytes : std::min(iter->bytes, (stop_pos - tmppage.offset)));
|
||||
tmppage.loaded = iter->loaded;
|
||||
tmppage.modified = iter->modified;
|
||||
|
||||
@ -753,7 +778,7 @@ bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size
|
||||
nodata_pages.clear();
|
||||
}else{
|
||||
// compress
|
||||
nodata_pages = compress_fdpage_list(tmp_pagelist);
|
||||
compress_fdpage_list(tmp_pagelist, nodata_pages);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -835,17 +860,16 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
Init(0, false, false);
|
||||
return true;
|
||||
}
|
||||
char* ptmp = new char[st.st_size + 1];
|
||||
std::unique_ptr<char[]> ptmp(new char[st.st_size + 1]);
|
||||
ssize_t result;
|
||||
// read from file
|
||||
if(0 >= (result = pread(file.GetFd(), ptmp, st.st_size, 0))){
|
||||
if(0 >= (result = pread(file.GetFd(), ptmp.get(), st.st_size, 0))){
|
||||
S3FS_PRN_ERR("failed to read stats(%d)", errno);
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
ptmp[result] = '\0';
|
||||
std::string oneline;
|
||||
std::istringstream ssall(ptmp);
|
||||
std::istringstream ssall(ptmp.get());
|
||||
|
||||
// loaded
|
||||
Clear();
|
||||
@ -855,7 +879,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
ino_t cache_inode; // if this value is 0, it means old format.
|
||||
if(!getline(ssall, oneline, '\n')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}else{
|
||||
std::istringstream sshead(oneline);
|
||||
@ -865,7 +888,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
// get first part in head line.
|
||||
if(!getline(sshead, strhead1, ':')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
// get second part in head line.
|
||||
@ -879,7 +901,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
|
||||
if(0 == cache_inode){
|
||||
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -887,7 +908,6 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
// check inode number
|
||||
if(0 != cache_inode && cache_inode != inode){
|
||||
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -921,14 +941,20 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
|
||||
}
|
||||
// add new area
|
||||
PageList::page_status pstatus =
|
||||
( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED :
|
||||
!is_loaded && is_modified ? PageList::PAGE_MODIFIED :
|
||||
is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED );
|
||||
|
||||
PageList::page_status pstatus = PageList::page_status::NOT_LOAD_MODIFIED;
|
||||
if(is_loaded){
|
||||
if(is_modified){
|
||||
pstatus = PageList::page_status::LOAD_MODIFIED;
|
||||
}else{
|
||||
pstatus = PageList::page_status::LOADED;
|
||||
}
|
||||
}else{
|
||||
if(is_modified){
|
||||
pstatus = PageList::page_status::MODIFIED;
|
||||
}
|
||||
}
|
||||
SetPageLoadedStatus(offset, size, pstatus);
|
||||
}
|
||||
delete[] ptmp;
|
||||
if(is_err){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
Clear();
|
||||
|
||||
@ -21,10 +21,8 @@
|
||||
#ifndef S3FS_FDCACHE_PAGE_H_
|
||||
#define S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
#include <list>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "fdcache_stat.h"
|
||||
#include <vector>
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
@ -51,7 +49,7 @@ struct fdpage
|
||||
bool loaded;
|
||||
bool modified;
|
||||
|
||||
fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) :
|
||||
explicit fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) :
|
||||
offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
|
||||
|
||||
off_t next() const
|
||||
@ -63,11 +61,12 @@ struct fdpage
|
||||
return (0 < bytes ? offset + bytes - 1 : 0);
|
||||
}
|
||||
};
|
||||
typedef std::list<struct fdpage> fdpage_list_t;
|
||||
typedef std::vector<struct fdpage> fdpage_list_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PageList
|
||||
//------------------------------------------------
|
||||
class CacheFileStat;
|
||||
class FdEntity;
|
||||
|
||||
// cppcheck-suppress copyCtorAndEqOperator
|
||||
@ -80,11 +79,11 @@ class PageList
|
||||
bool is_shrink; // [NOTE] true if it has been shrinked even once
|
||||
|
||||
public:
|
||||
enum page_status{
|
||||
PAGE_NOT_LOAD_MODIFIED = 0,
|
||||
PAGE_LOADED,
|
||||
PAGE_MODIFIED,
|
||||
PAGE_LOAD_MODIFIED
|
||||
enum class page_status{
|
||||
NOT_LOAD_MODIFIED = 0,
|
||||
LOADED,
|
||||
MODIFIED,
|
||||
LOAD_MODIFIED
|
||||
};
|
||||
|
||||
private:
|
||||
@ -99,7 +98,8 @@ class PageList
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrinked = false);
|
||||
explicit PageList(const PageList& other);
|
||||
PageList(const PageList&) = delete;
|
||||
PageList& operator=(const PageList&) = delete;
|
||||
~PageList();
|
||||
|
||||
bool Init(off_t size, bool is_loaded, bool is_modified);
|
||||
@ -107,7 +107,7 @@ class PageList
|
||||
bool Resize(off_t size, bool is_loaded, bool is_modified);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = page_status::LOADED, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
|
||||
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0, off_t limit_size = 0) const; // size=0 is checking to end of list
|
||||
size_t GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
|
||||
@ -18,12 +18,11 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_pseudofd.h"
|
||||
#include "autolock.h"
|
||||
|
||||
@ -34,7 +33,7 @@
|
||||
// The minimum pseudo fd value starts 2.
|
||||
// This is to avoid mistakes for 0(stdout) and 1(stderr), which are usually used.
|
||||
//
|
||||
#define MIN_PSEUDOFD_NUMBER 2
|
||||
static constexpr int MIN_PSEUDOFD_NUMBER = 2;
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdManager class methods
|
||||
|
||||
@ -21,6 +21,8 @@
|
||||
#ifndef S3FS_FDCACHE_PSEUDOFD_H_
|
||||
#define S3FS_FDCACHE_PSEUDOFD_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
//------------------------------------------------
|
||||
// Typdefs
|
||||
//------------------------------------------------
|
||||
@ -43,6 +45,10 @@ class PseudoFdManager
|
||||
|
||||
PseudoFdManager();
|
||||
~PseudoFdManager();
|
||||
PseudoFdManager(const PseudoFdManager&) = delete;
|
||||
PseudoFdManager(PseudoFdManager&&) = delete;
|
||||
PseudoFdManager& operator=(const PseudoFdManager&) = delete;
|
||||
PseudoFdManager& operator=(PseudoFdManager&&) = delete;
|
||||
|
||||
int GetUnusedMinPseudoFd() const;
|
||||
int CreatePseudoFd();
|
||||
|
||||
@ -18,14 +18,12 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "fdcache.h"
|
||||
#include "s3fs_util.h"
|
||||
@ -50,19 +48,19 @@ std::string CacheFileStat::GetCacheFileStatTopDir()
|
||||
return top_path;
|
||||
}
|
||||
|
||||
bool CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir)
|
||||
int CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir)
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_ERR("The path to cache top dir is empty.");
|
||||
return false;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if(is_create_dir){
|
||||
int result;
|
||||
if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){
|
||||
S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result);
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
if(!path || '\0' == path[0]){
|
||||
@ -70,7 +68,7 @@ bool CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_p
|
||||
}else{
|
||||
sfile_path = top_path + SAFESTRPTR(path);
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool CacheFileStat::CheckCacheFileStatTopDir()
|
||||
@ -84,26 +82,28 @@ bool CacheFileStat::CheckCacheFileStatTopDir()
|
||||
return check_exist_dir_permission(top_path.c_str());
|
||||
}
|
||||
|
||||
bool CacheFileStat::DeleteCacheFileStat(const char* path)
|
||||
int CacheFileStat::DeleteCacheFileStat(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
// stat path
|
||||
std::string sfile_path;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){
|
||||
int result;
|
||||
if(0 != (result = CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false))){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path);
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
if(0 != unlink(sfile_path.c_str())){
|
||||
if(ENOENT == errno){
|
||||
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno);
|
||||
result = -errno;
|
||||
if(-ENOENT == result){
|
||||
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, result);
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno);
|
||||
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, result);
|
||||
}
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
@ -129,7 +129,7 @@ bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath
|
||||
// stat path
|
||||
std::string old_filestat;
|
||||
std::string new_filestat;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
|
||||
if(0 != CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || 0 != CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -203,39 +203,40 @@ bool CacheFileStat::RawOpen(bool readonly)
|
||||
}
|
||||
// stat path
|
||||
std::string sfile_path;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
|
||||
if(0 != CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
|
||||
return false;
|
||||
}
|
||||
// open
|
||||
int tmpfd;
|
||||
if(readonly){
|
||||
if(-1 == (fd = open(sfile_path.c_str(), O_RDONLY))){
|
||||
if(-1 == (tmpfd = open(sfile_path.c_str(), O_RDONLY))){
|
||||
S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
|
||||
if(-1 == (tmpfd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
|
||||
S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
scope_guard guard([&]() { close(tmpfd); });
|
||||
|
||||
// lock
|
||||
if(-1 == flock(fd, LOCK_EX)){
|
||||
if(-1 == flock(tmpfd, LOCK_EX)){
|
||||
S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
close(fd);
|
||||
fd = -1;
|
||||
return false;
|
||||
}
|
||||
// seek top
|
||||
if(0 != lseek(fd, 0, SEEK_SET)){
|
||||
if(0 != lseek(tmpfd, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
flock(fd, LOCK_UN);
|
||||
close(fd);
|
||||
fd = -1;
|
||||
flock(tmpfd, LOCK_UN);
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str());
|
||||
|
||||
guard.dismiss();
|
||||
fd = tmpfd;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -21,6 +21,8 @@
|
||||
#ifndef S3FS_FDCACHE_STAT_H_
|
||||
#define S3FS_FDCACHE_STAT_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
@ -31,18 +33,18 @@ class CacheFileStat
|
||||
int fd;
|
||||
|
||||
private:
|
||||
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
static int MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
bool RawOpen(bool readonly);
|
||||
|
||||
public:
|
||||
static std::string GetCacheFileStatTopDir();
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static int DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir();
|
||||
static bool DeleteCacheFileStatDirectory();
|
||||
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
explicit CacheFileStat(const char* tpath = nullptr);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open();
|
||||
|
||||
@ -18,12 +18,9 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <algorithm>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_untreated.h"
|
||||
#include "autolock.h"
|
||||
|
||||
@ -91,17 +88,18 @@ bool UntreatedParts::AddPart(off_t start, off_t size)
|
||||
return true;
|
||||
|
||||
}else if((start + size) < iter->start){
|
||||
// The part to add should be inserted before the current part.
|
||||
// The part to add should be inserted before the current part.
|
||||
untreated_list.insert(iter, untreatedpart(start, size, last_tag));
|
||||
// success to stretch and compress existed parts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// There are no overlapping parts in the untreated_list, then add the part at end of list
|
||||
untreated_list.push_back(untreatedpart(start, size, last_tag));
|
||||
untreated_list.emplace_back(start, size, last_tag);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart)
|
||||
bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const
|
||||
{
|
||||
if(max_size <= 0 || min_size < 0 || max_size < min_size){
|
||||
S3FS_PRN_ERR("Paramter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
|
||||
@ -110,7 +108,7 @@ bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
// Check the overlap with the existing part and add the part.
|
||||
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(!lastpart || iter->untreated_tag == last_tag){
|
||||
if(min_size <= iter->size){
|
||||
if(iter->size <= max_size){
|
||||
@ -133,90 +131,6 @@ bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t
|
||||
return false;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// The part with the last tag cannot be taken out if it has not reached max_size.
|
||||
//
|
||||
bool UntreatedParts::TakeoutPart(off_t& start, off_t& size, off_t max_size, off_t min_size)
|
||||
{
|
||||
if(max_size <= 0 || min_size < 0 || max_size < min_size){
|
||||
S3FS_PRN_ERR("Paramter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
|
||||
return false;
|
||||
}
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
// Check the overlap with the existing part and add the part.
|
||||
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
// Last updated part
|
||||
if(max_size <= iter->size){
|
||||
// Take out only when the maximum part size is exceeded
|
||||
start = iter->start;
|
||||
size = max_size;
|
||||
iter->start = iter->start + max_size;
|
||||
iter->size = iter->size - max_size;
|
||||
|
||||
if(iter->size == 0){
|
||||
untreated_list.erase(iter);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}else{
|
||||
// Parts updated in the past
|
||||
if(min_size <= iter->size){
|
||||
if(iter->size <= max_size){
|
||||
// Take out the whole part( min <= part size <= max )
|
||||
start = iter->start;
|
||||
size = iter->size;
|
||||
untreated_list.erase(iter);
|
||||
}else{
|
||||
// Partially take out part( max < part size )
|
||||
start = iter->start;
|
||||
size = max_size;
|
||||
iter->start = iter->start + max_size;
|
||||
iter->size = iter->size - max_size;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method returns the part from the beginning, ignoring conditions
|
||||
// such as whether it is being updated(the last updated part) or less
|
||||
// than the minimum size.
|
||||
//
|
||||
bool UntreatedParts::TakeoutPartFromBegin(off_t& start, off_t& size, off_t max_size)
|
||||
{
|
||||
if(max_size <= 0){
|
||||
S3FS_PRN_ERR("Paramter is wrong(max_size=%lld).", static_cast<long long int>(max_size));
|
||||
return false;
|
||||
}
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
if(untreated_list.empty()){
|
||||
return false;
|
||||
}
|
||||
|
||||
untreated_list_t::iterator iter = untreated_list.begin();
|
||||
if(iter->size <= max_size){
|
||||
// Take out the whole part( part size <= max )
|
||||
start = iter->start;
|
||||
size = iter->size;
|
||||
|
||||
untreated_list.erase(iter);
|
||||
}else{
|
||||
// Take out only when the maximum part size is exceeded
|
||||
start = iter->start;
|
||||
size = max_size;
|
||||
|
||||
iter->start = iter->start + max_size;
|
||||
iter->size = iter->size - max_size;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If size is specified as 0, all areas(parts) after start will be deleted.
|
||||
//
|
||||
@ -251,7 +165,7 @@ bool UntreatedParts::ClearParts(off_t start, off_t size)
|
||||
}
|
||||
}else if(start < (iter->start + iter->size)){
|
||||
// clear area overlaps with iter area(on the end side)
|
||||
if(0 == size || (iter->start + iter->size) <= (start + size) ){
|
||||
if(0 == size || (iter->start + iter->size) <= (start + size)){
|
||||
// start to iter->end is clear
|
||||
iter->size = start - iter->start;
|
||||
}else{
|
||||
@ -274,6 +188,85 @@ bool UntreatedParts::ClearParts(off_t start, off_t size)
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Update the last updated Untreated part
|
||||
//
|
||||
bool UntreatedParts::GetLastUpdatePart(off_t& start, off_t& size) const
|
||||
{
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
start = iter->start;
|
||||
size = iter->size;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Replaces the last updated Untreated part.
|
||||
//
|
||||
// [NOTE]
|
||||
// If size <= 0, delete that part
|
||||
//
|
||||
bool UntreatedParts::ReplaceLastUpdatePart(off_t start, off_t size)
|
||||
{
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
if(0 < size){
|
||||
iter->start = start;
|
||||
iter->size = size;
|
||||
}else{
|
||||
iter = untreated_list.erase(iter);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Remove the last updated Untreated part.
|
||||
//
|
||||
bool UntreatedParts::RemoveLastUpdatePart()
|
||||
{
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
for(untreated_list_t::iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
untreated_list.erase(iter);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Duplicate the internally untreated_list.
|
||||
//
|
||||
bool UntreatedParts::Duplicate(untreated_list_t& list)
|
||||
{
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
list = untreated_list;
|
||||
return true;
|
||||
}
|
||||
|
||||
void UntreatedParts::Dump()
|
||||
{
|
||||
AutoLock auto_lock(&untreated_list_lock);
|
||||
|
||||
S3FS_PRN_DBG("untreated list = [");
|
||||
for(untreated_list_t::const_iterator iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
S3FS_PRN_DBG(" {%014lld - %014lld : tag=%ld}", static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->untreated_tag);
|
||||
}
|
||||
S3FS_PRN_DBG("]");
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -30,35 +30,38 @@
|
||||
class UntreatedParts
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t untreated_list_lock; // protects untreated_list
|
||||
mutable pthread_mutex_t untreated_list_lock; // protects untreated_list
|
||||
bool is_lock_init;
|
||||
|
||||
untreated_list_t untreated_list;
|
||||
long last_tag; // [NOTE] Use this to identify the latest updated part.
|
||||
|
||||
private:
|
||||
bool RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart);
|
||||
bool RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const;
|
||||
|
||||
public:
|
||||
UntreatedParts();
|
||||
~UntreatedParts();
|
||||
UntreatedParts(const UntreatedParts&) = delete;
|
||||
UntreatedParts(UntreatedParts&&) = delete;
|
||||
UntreatedParts& operator=(const UntreatedParts&) = delete;
|
||||
UntreatedParts& operator=(UntreatedParts&&) = delete;
|
||||
|
||||
bool empty();
|
||||
|
||||
bool AddPart(off_t start, off_t size);
|
||||
|
||||
// [NOTE]
|
||||
// The following method does not return parts smaller than mini_size.
|
||||
// You can avoid it by setting min_size to 0.
|
||||
//
|
||||
bool GetPart(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE) { return RowGetPart(start, size, max_size, min_size, false); }
|
||||
bool GetLastUpdatedPart(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE) { return RowGetPart(start, size, max_size, min_size, true); }
|
||||
|
||||
bool TakeoutPart(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE);
|
||||
bool TakeoutPartFromBegin(off_t& start, off_t& size, off_t max_size);
|
||||
bool GetLastUpdatedPart(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE) const { return RowGetPart(start, size, max_size, min_size, true); }
|
||||
|
||||
bool ClearParts(off_t start, off_t size);
|
||||
bool ClearAll() { return ClearParts(0, 0); }
|
||||
|
||||
bool GetLastUpdatePart(off_t& start, off_t& size) const;
|
||||
bool ReplaceLastUpdatePart(off_t start, off_t size);
|
||||
bool RemoveLastUpdatePart();
|
||||
|
||||
bool Duplicate(untreated_list_t& list);
|
||||
|
||||
void Dump();
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_UNTREATED_H_
|
||||
|
||||
@ -18,15 +18,15 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <gcrypt.h>
|
||||
#include <gnutls/gnutls.h>
|
||||
#include <gnutls/crypto.h>
|
||||
@ -41,6 +41,7 @@
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
@ -49,7 +50,7 @@
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
{
|
||||
static const char version[] = "GnuTLS(nettle)";
|
||||
static constexpr char version[] = "GnuTLS(nettle)";
|
||||
|
||||
return version;
|
||||
}
|
||||
@ -58,7 +59,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
static constexpr char version[] = "GnuTLS(gcrypt)";
|
||||
|
||||
return version;
|
||||
}
|
||||
@ -74,7 +75,7 @@ bool s3fs_init_global_ssl()
|
||||
return false;
|
||||
}
|
||||
#ifndef USE_GNUTLS_NETTLE
|
||||
if(NULL == gcry_check_version(NULL)){
|
||||
if(nullptr == gcry_check_version(nullptr)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
@ -105,76 +106,72 @@ bool s3fs_destroy_crypt_mutex()
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
*digest = new unsigned char[SHA1_DIGEST_SIZE];
|
||||
std::unique_ptr<unsigned char[]> digest(new unsigned char[SHA1_DIGEST_SIZE]);
|
||||
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(digest.get()));
|
||||
*digestlen = SHA1_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
*digest = new unsigned char[SHA256_DIGEST_SIZE];
|
||||
std::unique_ptr<unsigned char[]> digest(new unsigned char[SHA256_DIGEST_SIZE]);
|
||||
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(digest.get()));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen + 1]);
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, digest.get())){
|
||||
return nullptr;
|
||||
}
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen + 1]);
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, digest.get())){
|
||||
return nullptr;
|
||||
}
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
@ -182,22 +179,26 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result)
|
||||
{
|
||||
return 16;
|
||||
struct md5_ctx ctx_md5;
|
||||
md5_init(&ctx_md5);
|
||||
md5_update(&ctx_md5, datalen, data);
|
||||
md5_digest(&ctx_md5, result->size(), result->data());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
struct md5_ctx ctx_md5;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
@ -215,36 +216,48 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
md5_digest(&ctx_md5, result->size(), result->data());
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
|
||||
{
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_md5, digest->data(), digest->size());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
@ -259,15 +272,14 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
memcpy(result->data(), gcry_md_read(ctx_md5, 0), result->size());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
@ -275,30 +287,21 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, *digestlen, *digest);
|
||||
sha256_digest(&ctx_sha256, digest->size(), digest->data());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
struct sha256_ctx ctx_sha256;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
@ -313,55 +316,49 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
sha256_digest(&ctx_sha256, result->size(), result->data());
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[len];
|
||||
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
delete[] *digest;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
|
||||
memcpy(digest->data(), gcry_md_read(ctx_sha256, 0), digest->size());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
@ -376,15 +373,14 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
memcpy(result->data(), gcry_md_read(ctx_sha256, 0), result->size());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
@ -18,16 +18,17 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <ctime>
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "metaheader.h"
|
||||
#include "string_util.h"
|
||||
|
||||
static constexpr struct timespec DEFAULT_TIMESPEC = {-1, 0};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for convert
|
||||
//-------------------------------------------------------------------
|
||||
@ -49,14 +50,15 @@ static struct timespec cvt_string_to_time(const char *str)
|
||||
strmtime.erase(pos);
|
||||
}
|
||||
}
|
||||
return {static_cast<time_t>(cvt_strtoofft(strmtime.c_str(), /*base=*/ 10)), nsec};
|
||||
struct timespec ts = {static_cast<time_t>(cvt_strtoofft(strmtime.c_str(), /*base=*/ 10)), nsec};
|
||||
return ts;
|
||||
}
|
||||
|
||||
static struct timespec get_time(const headers_t& meta, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find(header))){
|
||||
return {-1, 0};
|
||||
return DEFAULT_TIMESPEC;
|
||||
}
|
||||
return cvt_string_to_time((*iter).second.c_str());
|
||||
}
|
||||
@ -72,9 +74,10 @@ struct timespec get_mtime(const headers_t& meta, bool overcheck)
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return {get_lastmodified(meta), 0};
|
||||
struct timespec ts = {get_lastmodified(meta), 0};
|
||||
return ts;
|
||||
}
|
||||
return {-1, 0};
|
||||
return DEFAULT_TIMESPEC;
|
||||
}
|
||||
|
||||
struct timespec get_ctime(const headers_t& meta, bool overcheck)
|
||||
@ -84,9 +87,10 @@ struct timespec get_ctime(const headers_t& meta, bool overcheck)
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return {get_lastmodified(meta), 0};
|
||||
struct timespec ts = {get_lastmodified(meta), 0};
|
||||
return ts;
|
||||
}
|
||||
return {-1, 0};
|
||||
return DEFAULT_TIMESPEC;
|
||||
}
|
||||
|
||||
struct timespec get_atime(const headers_t& meta, bool overcheck)
|
||||
@ -96,9 +100,10 @@ struct timespec get_atime(const headers_t& meta, bool overcheck)
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return {get_lastmodified(meta), 0};
|
||||
struct timespec ts = {get_lastmodified(meta), 0};
|
||||
return ts;
|
||||
}
|
||||
return {-1, 0};
|
||||
return DEFAULT_TIMESPEC;
|
||||
}
|
||||
|
||||
off_t get_size(const char *s)
|
||||
@ -120,7 +125,7 @@ mode_t get_mode(const char *s, int base)
|
||||
return static_cast<mode_t>(cvt_strtoofft(s, base));
|
||||
}
|
||||
|
||||
mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir, bool forcedir)
|
||||
{
|
||||
mode_t mode = 0;
|
||||
bool isS3sync = false;
|
||||
@ -136,7 +141,7 @@ mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool for
|
||||
}else{
|
||||
// If another tool creates an object without permissions, default to owner
|
||||
// read-write and group readable.
|
||||
mode = path[strlen(path) - 1] == '/' ? 0750 : 0640;
|
||||
mode = (!strpath.empty() && '/' == *strpath.rbegin()) ? 0750 : 0640;
|
||||
}
|
||||
|
||||
// Checking the bitmask, if the last 3 bits are all zero then process as a regular
|
||||
@ -158,7 +163,7 @@ mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool for
|
||||
if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){
|
||||
// Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
|
||||
}else if(!strpath.empty() && '/' == *strpath.rbegin()){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
@ -240,7 +245,7 @@ gid_t get_gid(const headers_t& meta)
|
||||
|
||||
blkcnt_t get_blocks(off_t size)
|
||||
{
|
||||
return size / 512 + 1;
|
||||
return (size / 512) + (0 == (size % 512) ? 0 : 1);
|
||||
}
|
||||
|
||||
time_t cvtIAMExpireStringToTime(const char* s)
|
||||
|
||||
@ -22,13 +22,13 @@
|
||||
#define S3FS_METAHEADER_H_
|
||||
|
||||
#include <string>
|
||||
#include <strings.h>
|
||||
#include <map>
|
||||
#include <list>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// headers_t
|
||||
//-------------------------------------------------------------------
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>
|
||||
struct header_nocase_cmp
|
||||
{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
@ -46,7 +46,7 @@ struct timespec get_atime(const headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(const headers_t& meta);
|
||||
mode_t get_mode(const char *s, int base = 0);
|
||||
mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(const headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
|
||||
@ -21,8 +21,8 @@
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "mpu_util.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs_xml.h"
|
||||
@ -32,12 +32,12 @@
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
utility_incomp_type utility_mode = NO_UTILITY_MODE;
|
||||
utility_incomp_type utility_mode = utility_incomp_type::NO_UTILITY_MODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static void print_incomp_mpu_list(incomp_mpu_list_t& list)
|
||||
static void print_incomp_mpu_list(const incomp_mpu_list_t& list)
|
||||
{
|
||||
printf("\n");
|
||||
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
|
||||
@ -47,7 +47,7 @@ static void print_incomp_mpu_list(incomp_mpu_list_t& list)
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
int cnt = 0;
|
||||
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
|
||||
for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
|
||||
printf(" Path : %s\n", (*iter).key.c_str());
|
||||
printf(" UploadId : %s\n", (*iter).id.c_str());
|
||||
printf(" Date : %s\n", (*iter).date.c_str());
|
||||
@ -60,17 +60,17 @@ static void print_incomp_mpu_list(incomp_mpu_list_t& list)
|
||||
}
|
||||
}
|
||||
|
||||
static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time)
|
||||
static bool abort_incomp_mpu_list(const incomp_mpu_list_t& list, time_t abort_time)
|
||||
{
|
||||
if(list.empty()){
|
||||
return true;
|
||||
}
|
||||
time_t now_time = time(NULL);
|
||||
time_t now_time = time(nullptr);
|
||||
|
||||
// do removing.
|
||||
S3fsCurl s3fscurl;
|
||||
bool result = true;
|
||||
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){
|
||||
for(incomp_mpu_list_t::const_iterator iter = list.begin(); iter != list.end(); ++iter){
|
||||
const char* tpath = (*iter).key.c_str();
|
||||
std::string upload_id = (*iter).id;
|
||||
|
||||
@ -100,7 +100,7 @@ static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time)
|
||||
|
||||
int s3fs_utility_processing(time_t abort_time)
|
||||
{
|
||||
if(NO_UTILITY_MODE == utility_mode){
|
||||
if(utility_incomp_type::NO_UTILITY_MODE == utility_mode){
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
printf("\n*** s3fs run as utility mode.\n\n");
|
||||
@ -116,7 +116,7 @@ int s3fs_utility_processing(time_t abort_time)
|
||||
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", NULL, 0))){
|
||||
if(nullptr == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", nullptr, 0))){
|
||||
S3FS_PRN_DBG("xmlReadMemory exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
@ -128,10 +128,10 @@ int s3fs_utility_processing(time_t abort_time)
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
if(INCOMP_TYPE_LIST == utility_mode){
|
||||
if(utility_incomp_type::INCOMP_TYPE_LIST == utility_mode){
|
||||
// print list
|
||||
print_incomp_mpu_list(list);
|
||||
}else if(INCOMP_TYPE_ABORT == utility_mode){
|
||||
}else if(utility_incomp_type::INCOMP_TYPE_ABORT == utility_mode){
|
||||
// remove
|
||||
if(!abort_incomp_mpu_list(list, abort_time)){
|
||||
S3FS_PRN_DBG("an error occurred during removal process.");
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
#define S3FS_MPU_UTIL_H_
|
||||
|
||||
#include <string>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
@ -34,12 +34,12 @@ typedef struct incomplete_multipart_upload_info
|
||||
std::string date;
|
||||
}INCOMP_MPU_INFO;
|
||||
|
||||
typedef std::list<INCOMP_MPU_INFO> incomp_mpu_list_t;
|
||||
typedef std::vector<INCOMP_MPU_INFO> incomp_mpu_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// enum for utility process mode
|
||||
//-------------------------------------------------------------------
|
||||
enum utility_incomp_type{
|
||||
enum class utility_incomp_type{
|
||||
NO_UTILITY_MODE = 0, // not utility mode
|
||||
INCOMP_TYPE_LIST, // list of incomplete mpu
|
||||
INCOMP_TYPE_ABORT // delete incomplete mpu
|
||||
|
||||
139
src/mvnode.cpp
139
src/mvnode.cpp
@ -1,139 +0,0 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "mvnode.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for moving objects
|
||||
//-------------------------------------------------------------------
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir)
|
||||
{
|
||||
MVNODE *p;
|
||||
char *p_old_path;
|
||||
char *p_new_path;
|
||||
|
||||
if(NULL == (p_old_path = strdup(old_path))){
|
||||
printf("create_mvnode: could not allocation memory for p_old_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(NULL == (p_new_path = strdup(new_path))){
|
||||
free(p_old_path);
|
||||
printf("create_mvnode: could not allocation memory for p_new_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
p = new MVNODE();
|
||||
p->old_path = p_old_path;
|
||||
p->new_path = p_new_path;
|
||||
p->is_dir = is_dir;
|
||||
p->is_normdir = normdir;
|
||||
p->prev = NULL;
|
||||
p->next = NULL;
|
||||
return p;
|
||||
}
|
||||
|
||||
//
|
||||
// Add sorted MVNODE data(Ascending order)
|
||||
//
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir)
|
||||
{
|
||||
if(!head || !tail){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
MVNODE* cur;
|
||||
MVNODE* mvnew;
|
||||
for(cur = *head; cur; cur = cur->next){
|
||||
if(cur->is_dir == is_dir){
|
||||
int nResult = strcmp(cur->old_path, old_path);
|
||||
if(0 == nResult){
|
||||
// Found same old_path.
|
||||
return cur;
|
||||
|
||||
}else if(0 > nResult){
|
||||
// next check.
|
||||
// ex: cur("abc"), mvnew("abcd")
|
||||
// ex: cur("abc"), mvnew("abd")
|
||||
continue;
|
||||
|
||||
}else{
|
||||
// Add into before cur-pos.
|
||||
// ex: cur("abc"), mvnew("ab")
|
||||
// ex: cur("abc"), mvnew("abb")
|
||||
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
|
||||
return NULL;
|
||||
}
|
||||
if(cur->prev){
|
||||
(cur->prev)->next = mvnew;
|
||||
}else{
|
||||
*head = mvnew;
|
||||
}
|
||||
mvnew->prev = cur->prev;
|
||||
mvnew->next = cur;
|
||||
cur->prev = mvnew;
|
||||
|
||||
return mvnew;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add into tail.
|
||||
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
|
||||
return NULL;
|
||||
}
|
||||
mvnew->prev = (*tail);
|
||||
if(*tail){
|
||||
(*tail)->next = mvnew;
|
||||
}
|
||||
(*tail) = mvnew;
|
||||
if(!(*head)){
|
||||
(*head) = mvnew;
|
||||
}
|
||||
return mvnew;
|
||||
}
|
||||
|
||||
void free_mvnodes(MVNODE *head)
|
||||
{
|
||||
MVNODE *my_head;
|
||||
MVNODE *next;
|
||||
|
||||
for(my_head = head, next = NULL; my_head; my_head = next){
|
||||
next = my_head->next;
|
||||
free(my_head->old_path);
|
||||
free(my_head->new_path);
|
||||
delete my_head;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
53
src/mvnode.h
53
src/mvnode.h
@ -1,53 +0,0 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_MVNODE_H_
|
||||
#define S3FS_MVNODE_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct mvnode
|
||||
{
|
||||
char* old_path;
|
||||
char* new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
struct mvnode* prev;
|
||||
struct mvnode* next;
|
||||
} MVNODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for moving objects
|
||||
//-------------------------------------------------------------------
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
#endif // S3FS_MVNODE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -18,15 +18,15 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <nss.h>
|
||||
#include <pk11pub.h>
|
||||
#include <hasht.h>
|
||||
@ -37,13 +37,14 @@
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "NSS";
|
||||
static constexpr char version[] = "NSS";
|
||||
|
||||
return version;
|
||||
}
|
||||
@ -55,7 +56,7 @@ bool s3fs_init_global_ssl()
|
||||
{
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
if(SECSuccess != NSS_NoDB_Init(NULL)){
|
||||
if(SECSuccess != NSS_NoDB_Init(nullptr)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
@ -86,10 +87,10 @@ bool s3fs_destroy_crypt_mutex()
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
PK11SlotInfo* Slot;
|
||||
@ -97,19 +98,19 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
PK11Context* Context;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
SECItem NullSecItem = {siBuffer, nullptr, 0};
|
||||
|
||||
if(NULL == (Slot = PK11_GetInternalKeySlot())){
|
||||
return false;
|
||||
if(nullptr == (Slot = PK11_GetInternalKeySlot())){
|
||||
return nullptr;
|
||||
}
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
if(nullptr == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, nullptr))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
if(nullptr == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
*digestlen = 0;
|
||||
@ -120,47 +121,54 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
|
||||
*digest = new unsigned char[*digestlen];
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen]);
|
||||
memcpy(digest.get(), tmpdigest, *digestlen);
|
||||
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result)
|
||||
{
|
||||
return MD5_LENGTH;
|
||||
PK11Context* md5ctx;
|
||||
unsigned int md5outlen;
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
PK11_DigestOp(md5ctx, data, datalen);
|
||||
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
PK11Context* md5ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int md5outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
@ -179,53 +187,42 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
|
||||
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
return result;
|
||||
return false;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
|
||||
PK11_DigestFinal(sha256ctx, digest->data(), &sha256outlen, digest->size());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
*digestlen = sha256outlen;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
PK11Context* sha256ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int sha256outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
@ -244,15 +241,14 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DigestFinal(sha256ctx, result->data(), &sha256outlen, result->size());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -18,17 +18,16 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <cstring>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/md5.h>
|
||||
@ -36,18 +35,16 @@
|
||||
#include <openssl/crypto.h>
|
||||
#include <openssl/err.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "OpenSSL";
|
||||
static constexpr char version[] = "OpenSSL";
|
||||
|
||||
return version;
|
||||
}
|
||||
@ -58,7 +55,14 @@ const char* s3fs_crypt_lib_name()
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
ERR_load_crypto_strings();
|
||||
|
||||
// [NOTE]
|
||||
// OpenSSL 3.0 loads error strings automatically so these functions are not needed.
|
||||
//
|
||||
#ifndef USE_OPENSSL_30
|
||||
ERR_load_BIO_strings();
|
||||
#endif
|
||||
|
||||
OpenSSL_add_all_algorithms();
|
||||
return true;
|
||||
}
|
||||
@ -79,7 +83,7 @@ struct CRYPTO_dynlock_value
|
||||
pthread_mutex_t dyn_mutex;
|
||||
};
|
||||
|
||||
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
|
||||
static pthread_mutex_t* s3fs_crypt_mutex = nullptr;
|
||||
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
@ -120,7 +124,7 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", result);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return dyndata;
|
||||
}
|
||||
@ -160,7 +164,10 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not nullptr, destroy it.");
|
||||
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
@ -196,11 +203,11 @@ bool s3fs_destroy_crypt_mutex()
|
||||
return true;
|
||||
}
|
||||
|
||||
CRYPTO_set_dynlock_destroy_callback(NULL);
|
||||
CRYPTO_set_dynlock_lock_callback(NULL);
|
||||
CRYPTO_set_dynlock_create_callback(NULL);
|
||||
CRYPTO_set_id_callback(NULL);
|
||||
CRYPTO_set_locking_callback(NULL);
|
||||
CRYPTO_set_dynlock_destroy_callback(nullptr);
|
||||
CRYPTO_set_dynlock_lock_callback(nullptr);
|
||||
CRYPTO_set_dynlock_create_callback(nullptr);
|
||||
CRYPTO_set_id_callback(nullptr);
|
||||
CRYPTO_set_locking_callback(nullptr);
|
||||
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int result = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
|
||||
@ -211,7 +218,7 @@ bool s3fs_destroy_crypt_mutex()
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
delete[] s3fs_crypt_mutex;
|
||||
s3fs_crypt_mutex = NULL;
|
||||
s3fs_crypt_mutex = nullptr;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -219,50 +226,127 @@ bool s3fs_destroy_crypt_mutex()
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
std::unique_ptr<unsigned char[]> digest(new unsigned char[*digestlen]);
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, static_cast<int>(keylen), data, datalen, *digest, digestlen);
|
||||
HMAC(EVP_sha256(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, static_cast<int>(keylen), data, datalen, *digest, digestlen);
|
||||
HMAC(EVP_sha1(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
|
||||
}
|
||||
|
||||
return digest;
|
||||
}
|
||||
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, false);
|
||||
}
|
||||
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, true);
|
||||
}
|
||||
|
||||
#ifdef USE_OPENSSL_30
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5 (OpenSSL >= 3.0)
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// OpenSSL 3.0 deprecated the MD5_*** low-level encryption functions,
|
||||
// so we should use the high-level EVP API instead.
|
||||
//
|
||||
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
|
||||
{
|
||||
unsigned int digestlen = static_cast<unsigned int>(digest->size());
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("md5");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, nullptr);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
MD5_CTX md5ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
EVP_MD_CTX* mdctx;
|
||||
unsigned int md5_digest_len = static_cast<unsigned int>(result->size());
|
||||
off_t bytes;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
// instead of MD5_Init
|
||||
mdctx = EVP_MD_CTX_new();
|
||||
EVP_DigestInit_ex(mdctx, EVP_md5(), nullptr);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
const off_t len = 512;
|
||||
char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_free(mdctx);
|
||||
return false;
|
||||
}
|
||||
// instead of MD5_Update
|
||||
EVP_DigestUpdate(mdctx, buf, bytes);
|
||||
}
|
||||
|
||||
// instead of MD5_Final
|
||||
EVP_DigestFinal_ex(mdctx, result->data(), &md5_digest_len);
|
||||
EVP_MD_CTX_free(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5 (OpenSSL < 3.0)
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// TODO: Does this fail on OpenSSL < 3.0 and we need to use MD5_CTX functions?
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
|
||||
{
|
||||
unsigned int digestlen = digest->size();
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("md5");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, nullptr);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
MD5_CTX md5ctx;
|
||||
off_t bytes;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
@ -280,61 +364,53 @@ unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
MD5_Update(&md5ctx, buf, bytes);
|
||||
}
|
||||
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
MD5_Final(result, &md5ctx);
|
||||
MD5_Final(result->data(), &md5ctx);
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, NULL);
|
||||
EVP_DigestInit_ex(mdctx, md, nullptr);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
|
||||
unsigned int digestlen = static_cast<unsigned int>(digest->size());
|
||||
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == fd){
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
S3FS_PRN_ERR("fstat error(%d)", errno);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
EVP_DigestInit_ex(sha256ctx, md, nullptr);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
const off_t len = 512;
|
||||
@ -348,15 +424,14 @@ unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_DigestFinal_ex(sha256ctx, result->data(), nullptr);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -41,7 +41,20 @@ class Semaphore
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
Semaphore(const Semaphore&) = delete;
|
||||
Semaphore(Semaphore&&) = delete;
|
||||
Semaphore& operator=(const Semaphore&) = delete;
|
||||
Semaphore& operator=(Semaphore&&) = delete;
|
||||
|
||||
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
bool try_wait()
|
||||
{
|
||||
if(0 == dispatch_semaphore_wait(sem, DISPATCH_TIME_NOW)){
|
||||
return true;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
void post() { dispatch_semaphore_signal(sem); }
|
||||
int get_value() const { return value; }
|
||||
|
||||
@ -67,6 +80,15 @@ class Semaphore
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
bool try_wait()
|
||||
{
|
||||
int result;
|
||||
do{
|
||||
result = sem_trywait(&mutex);
|
||||
}while(result == -1 && errno == EINTR);
|
||||
|
||||
return (0 == result);
|
||||
}
|
||||
void post() { sem_post(&mutex); }
|
||||
int get_value() const { return value; }
|
||||
|
||||
|
||||
2648
src/s3fs.cpp
2648
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
@ -21,9 +21,14 @@
|
||||
#ifndef S3FS_AUTH_H_
|
||||
#define S3FS_AUTH_H_
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <sys/types.h>
|
||||
|
||||
typedef std::array<unsigned char, 16> md5_t;
|
||||
typedef std::array<unsigned char, 32> sha256_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for Authentication
|
||||
//-------------------------------------------------------------------
|
||||
@ -41,13 +46,12 @@ bool s3fs_init_global_ssl();
|
||||
bool s3fs_destroy_global_ssl();
|
||||
bool s3fs_init_crypt_mutex();
|
||||
bool s3fs_destroy_crypt_mutex();
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_md5_digest_length();
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size);
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_sha256_digest_length();
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size);
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen);
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen);
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result);
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result);
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest);
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result);
|
||||
|
||||
#endif // S3FS_AUTH_H_
|
||||
|
||||
|
||||
@ -18,15 +18,19 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <pwd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <dlfcn.h>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_cred.h"
|
||||
#include "s3fs_help.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "curl.h"
|
||||
#include "string_util.h"
|
||||
#include "metaheader.h"
|
||||
@ -34,26 +38,94 @@
|
||||
//-------------------------------------------------------------------
|
||||
// Symbols
|
||||
//-------------------------------------------------------------------
|
||||
#define DEFAULT_AWS_PROFILE_NAME "default"
|
||||
static constexpr char DEFAULT_AWS_PROFILE_NAME[] = "default";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// External Credential dummy function
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// This function expects the following values:
|
||||
//
|
||||
// detail=false ex. "Custom AWS Credential Library - v1.0.0"
|
||||
// detail=true ex. "Custom AWS Credential Library - v1.0.0
|
||||
// s3fs-fuse credential I/F library for S3 compatible strage X.
|
||||
// Copyright(C) 2022 Foo"
|
||||
//
|
||||
const char* VersionS3fsCredential(bool detail)
|
||||
{
|
||||
static constexpr char version[] = "built-in";
|
||||
static constexpr char detail_version[] =
|
||||
"s3fs-fuse built-in Credential I/F Function\n"
|
||||
"Copyright(C) 2007 s3fs-fuse\n";
|
||||
|
||||
if(detail){
|
||||
return detail_version;
|
||||
}else{
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
bool InitS3fsCredential(const char* popts, char** pperrstr)
|
||||
{
|
||||
if(popts && 0 < strlen(popts)){
|
||||
S3FS_PRN_WARN("The external credential library does not have InitS3fsCredential function, but credlib_opts value is not empty(%s)", popts);
|
||||
}
|
||||
if(pperrstr){
|
||||
*pperrstr = strdup("The external credential library does not have InitS3fsCredential function, so built-in function was called.");
|
||||
}else{
|
||||
S3FS_PRN_INFO("The external credential library does not have InitS3fsCredential function, so built-in function was called.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FreeS3fsCredential(char** pperrstr)
|
||||
{
|
||||
if(pperrstr){
|
||||
*pperrstr = strdup("The external credential library does not have FreeS3fsCredential function, so built-in function was called.");
|
||||
}else{
|
||||
S3FS_PRN_INFO("The external credential library does not have FreeS3fsCredential function, so built-in function was called.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
|
||||
{
|
||||
S3FS_PRN_INFO("Parameters : ppaccess_key_id=%p, ppserect_access_key=%p, ppaccess_token=%p, ptoken_expire=%p", ppaccess_key_id, ppserect_access_key, ppaccess_token, ptoken_expire);
|
||||
|
||||
if(pperrstr){
|
||||
*pperrstr = strdup("Check why built-in function was called, the external credential library must have UpdateS3fsCredential function.");
|
||||
}else{
|
||||
S3FS_PRN_CRIT("Check why built-in function was called, the external credential library must have UpdateS3fsCredential function.");
|
||||
}
|
||||
|
||||
if(ppaccess_key_id){
|
||||
*ppaccess_key_id = nullptr;
|
||||
}
|
||||
if(ppserect_access_key){
|
||||
*ppserect_access_key = nullptr;
|
||||
}
|
||||
if(ppaccess_token){
|
||||
*ppaccess_token = nullptr;
|
||||
}
|
||||
return false; // always false
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class Variables
|
||||
//-------------------------------------------------------------------
|
||||
const char* S3fsCred::ALLBUCKET_FIELDS_TYPE = "";
|
||||
const char* S3fsCred::KEYVAL_FIELDS_TYPE = "\t";
|
||||
const char* S3fsCred::AWS_ACCESSKEYID = "AWSAccessKeyId";
|
||||
const char* S3fsCred::AWS_SECRETKEY = "AWSSecretKey";
|
||||
constexpr char S3fsCred::ALLBUCKET_FIELDS_TYPE[];
|
||||
constexpr char S3fsCred::KEYVAL_FIELDS_TYPE[];
|
||||
constexpr char S3fsCred::AWS_ACCESSKEYID[];
|
||||
constexpr char S3fsCred::AWS_SECRETKEY[];
|
||||
|
||||
const int S3fsCred::IAM_EXPIRE_MERGIN = 20 * 60; // update timing
|
||||
const char* S3fsCred::ECS_IAM_ENV_VAR = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
||||
const char* S3fsCred::IAMCRED_ACCESSKEYID = "AccessKeyId";
|
||||
const char* S3fsCred::IAMCRED_SECRETACCESSKEY = "SecretAccessKey";
|
||||
const char* S3fsCred::IAMCRED_ROLEARN = "RoleArn";
|
||||
constexpr char S3fsCred::ECS_IAM_ENV_VAR[];
|
||||
constexpr char S3fsCred::IAMCRED_ACCESSKEYID[];
|
||||
constexpr char S3fsCred::IAMCRED_SECRETACCESSKEY[];
|
||||
constexpr char S3fsCred::IAMCRED_ROLEARN[];
|
||||
|
||||
const char* S3fsCred::IAMv2_token_url = "http://169.254.169.254/latest/api/token";
|
||||
int S3fsCred::IAMv2_token_ttl = 21600;
|
||||
const char* S3fsCred::IAMv2_token_ttl_hdr = "X-aws-ec2-metadata-token-ttl-seconds";
|
||||
const char* S3fsCred::IAMv2_token_hdr = "X-aws-ec2-metadata-token";
|
||||
constexpr char S3fsCred::IAMv2_token_url[];
|
||||
constexpr char S3fsCred::IAMv2_token_ttl_hdr[];
|
||||
constexpr char S3fsCred::IAMv2_token_hdr[];
|
||||
|
||||
std::string S3fsCred::bucket_name;
|
||||
|
||||
@ -71,7 +143,7 @@ bool S3fsCred::SetBucket(const char* bucket)
|
||||
|
||||
const std::string& S3fsCred::GetBucket()
|
||||
{
|
||||
return S3fsCred::bucket_name;
|
||||
return S3fsCred::bucket_name;
|
||||
}
|
||||
|
||||
bool S3fsCred::ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename)
|
||||
@ -98,23 +170,23 @@ bool S3fsCred::ParseIAMRoleFromMetaDataResponse(const char* response, std::strin
|
||||
//-------------------------------------------------------------------
|
||||
S3fsCred::S3fsCred() :
|
||||
is_lock_init(false),
|
||||
passwd_file(""),
|
||||
aws_profile(DEFAULT_AWS_PROFILE_NAME),
|
||||
load_iamrole(false),
|
||||
AWSAccessKeyId(""),
|
||||
AWSSecretAccessKey(""),
|
||||
AWSAccessToken(""),
|
||||
AWSAccessTokenExpire(0),
|
||||
is_ecs(false),
|
||||
is_use_session_token(false),
|
||||
is_ibm_iam_auth(false),
|
||||
IAM_cred_url("http://169.254.169.254/latest/meta-data/iam/security-credentials/"),
|
||||
IAM_api_version(2),
|
||||
IAMv2_api_token(""),
|
||||
IAM_field_count(4),
|
||||
IAM_token_field("Token"),
|
||||
IAM_expiry_field("Expiration"),
|
||||
IAM_role("")
|
||||
set_builtin_cred_opts(false),
|
||||
hExtCredLib(nullptr),
|
||||
pFuncCredVersion(VersionS3fsCredential),
|
||||
pFuncCredInit(InitS3fsCredential),
|
||||
pFuncCredFree(FreeS3fsCredential),
|
||||
pFuncCredUpdate(UpdateS3fsCredential)
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
@ -131,6 +203,8 @@ S3fsCred::S3fsCred() :
|
||||
|
||||
S3fsCred::~S3fsCred()
|
||||
{
|
||||
UnloadExtCredLib();
|
||||
|
||||
if(is_lock_init){
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_destroy(&token_lock))){
|
||||
@ -154,7 +228,7 @@ bool S3fsCred::SetS3fsPasswdFile(const char* file)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::IsSetPasswdFile()
|
||||
bool S3fsCred::IsSetPasswdFile() const
|
||||
{
|
||||
return !passwd_file.empty();
|
||||
}
|
||||
@ -208,7 +282,7 @@ bool S3fsCred::SetAccessKeyWithSessionToken(const char* AccessKeyId, const char*
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::IsSetAccessKeys(AutoLock::Type type)
|
||||
bool S3fsCred::IsSetAccessKeys(AutoLock::Type type) const
|
||||
{
|
||||
AutoLock auto_lock(&token_lock, type);
|
||||
|
||||
@ -244,14 +318,14 @@ bool S3fsCred::SetIAMRole(const char* role, AutoLock::Type type)
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string S3fsCred::GetIAMRole(AutoLock::Type type)
|
||||
std::string S3fsCred::GetIAMRole(AutoLock::Type type) const
|
||||
{
|
||||
AutoLock auto_lock(&token_lock, type);
|
||||
|
||||
return IAM_role;
|
||||
}
|
||||
|
||||
bool S3fsCred::IsSetIAMRole(AutoLock::Type type)
|
||||
bool S3fsCred::IsSetIAMRole(AutoLock::Type type) const
|
||||
{
|
||||
AutoLock auto_lock(&token_lock, type);
|
||||
|
||||
@ -299,7 +373,7 @@ bool S3fsCred::GetIAMCredentialsURL(std::string& url, bool check_iam_role, AutoL
|
||||
|
||||
if(is_ecs){
|
||||
const char *env = std::getenv(S3fsCred::ECS_IAM_ENV_VAR);
|
||||
if(env == NULL){
|
||||
if(env == nullptr){
|
||||
S3FS_PRN_ERR("%s is not set.", S3fsCred::ECS_IAM_ENV_VAR);
|
||||
return false;
|
||||
}
|
||||
@ -358,7 +432,7 @@ int S3fsCred::SetIMDSVersion(int version, AutoLock::Type type)
|
||||
return old;
|
||||
}
|
||||
|
||||
int S3fsCred::GetIMDSVersion(AutoLock::Type type)
|
||||
int S3fsCred::GetIMDSVersion(AutoLock::Type type) const
|
||||
{
|
||||
AutoLock auto_lock(&token_lock, type);
|
||||
|
||||
@ -378,7 +452,7 @@ bool S3fsCred::SetIAMv2APIToken(const std::string& token, AutoLock::Type type)
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string S3fsCred::GetIAMv2APIToken(AutoLock::Type type)
|
||||
std::string S3fsCred::GetIAMv2APIToken(AutoLock::Type type) const
|
||||
{
|
||||
AutoLock auto_lock(&token_lock, type);
|
||||
|
||||
@ -404,14 +478,14 @@ bool S3fsCred::LoadIAMCredentials(AutoLock::Type type)
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* iam_v2_token = NULL;
|
||||
const char* iam_v2_token = nullptr;
|
||||
std::string str_iam_v2_token;
|
||||
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
|
||||
str_iam_v2_token = GetIAMv2APIToken(AutoLock::ALREADY_LOCKED);
|
||||
iam_v2_token = str_iam_v2_token.c_str();
|
||||
}
|
||||
|
||||
const char* ibm_secret_access_key = NULL;
|
||||
const char* ibm_secret_access_key = nullptr;
|
||||
std::string str_ibm_secret_access_key;
|
||||
if(IsIBMIAMAuth()){
|
||||
str_ibm_secret_access_key = AWSSecretAccessKey;
|
||||
@ -428,7 +502,7 @@ bool S3fsCred::LoadIAMCredentials(AutoLock::Type type)
|
||||
S3FS_PRN_ERR("Something error occurred, could not set IAM role name.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
@ -446,7 +520,7 @@ bool S3fsCred::LoadIAMRoleFromMetaData()
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* iam_v2_token = NULL;
|
||||
const char* iam_v2_token = nullptr;
|
||||
std::string str_iam_v2_token;
|
||||
if(GetIMDSVersion(AutoLock::ALREADY_LOCKED) > 1){
|
||||
str_iam_v2_token = GetIAMv2APIToken(AutoLock::ALREADY_LOCKED);
|
||||
@ -493,8 +567,8 @@ bool S3fsCred::SetIAMCredentials(const char* response, AutoLock::Type type)
|
||||
}
|
||||
AWSAccessTokenExpire = static_cast<time_t>(tmp_expire);
|
||||
}else{
|
||||
AWSAccessKeyId = keyval[std::string(S3fsCred::IAMCRED_ACCESSKEYID)];
|
||||
AWSSecretAccessKey = keyval[std::string(S3fsCred::IAMCRED_SECRETACCESSKEY)];
|
||||
AWSAccessKeyId = keyval[S3fsCred::IAMCRED_ACCESSKEYID];
|
||||
AWSSecretAccessKey = keyval[S3fsCred::IAMCRED_SECRETACCESSKEY];
|
||||
AWSAccessTokenExpire = cvtIAMExpireStringToTime(keyval[IAM_expiry_field].c_str());
|
||||
}
|
||||
return true;
|
||||
@ -519,7 +593,7 @@ bool S3fsCred::SetIAMRoleFromMetaData(const char* response, AutoLock::Type type)
|
||||
//
|
||||
// Check passwd file readable
|
||||
//
|
||||
bool S3fsCred::IsReadableS3fsPasswdFile()
|
||||
bool S3fsCred::IsReadableS3fsPasswdFile() const
|
||||
{
|
||||
if(passwd_file.empty()){
|
||||
return false;
|
||||
@ -551,11 +625,11 @@ bool S3fsCred::CheckS3fsPasswdFilePerms()
|
||||
|
||||
// let's get the file info
|
||||
if(stat(passwd_file.c_str(), &info) != 0){
|
||||
S3FS_PRN_EXIT("unexpected error from stat(%s).", passwd_file.c_str());
|
||||
S3FS_PRN_EXIT("unexpected error from stat(%s): %s", passwd_file.c_str(), strerror(errno));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check readable
|
||||
// Check readable
|
||||
if(!IsReadableS3fsPasswdFile()){
|
||||
S3FS_PRN_EXIT("S3fs passwd file \"%s\" is not readable.", passwd_file.c_str());
|
||||
return false;
|
||||
@ -902,7 +976,7 @@ bool S3fsCred::InitialS3fsCredentials()
|
||||
}
|
||||
|
||||
// access key loading is deferred
|
||||
if(load_iamrole || is_ecs){
|
||||
if(load_iamrole || IsSetExtCredLib() || is_ecs){
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -920,18 +994,18 @@ bool S3fsCred::InitialS3fsCredentials()
|
||||
}
|
||||
|
||||
// 3 - environment variables
|
||||
char* AWSACCESSKEYID = getenv("AWS_ACCESS_KEY_ID") ? getenv("AWS_ACCESS_KEY_ID") : getenv("AWSACCESSKEYID");
|
||||
char* AWSSECRETACCESSKEY = getenv("AWS_SECRET_ACCESS_KEY") ? getenv("AWS_SECRET_ACCESS_KEY") : getenv("AWSSECRETACCESSKEY");
|
||||
char* AWSSESSIONTOKEN = getenv("AWS_SESSION_TOKEN") ? getenv("AWS_SESSION_TOKEN") : getenv("AWSSESSIONTOKEN");
|
||||
const char* AWSACCESSKEYID = getenv("AWS_ACCESS_KEY_ID") ? getenv("AWS_ACCESS_KEY_ID") : getenv("AWSACCESSKEYID");
|
||||
const char* AWSSECRETACCESSKEY = getenv("AWS_SECRET_ACCESS_KEY") ? getenv("AWS_SECRET_ACCESS_KEY") : getenv("AWSSECRETACCESSKEY");
|
||||
const char* AWSSESSIONTOKEN = getenv("AWS_SESSION_TOKEN") ? getenv("AWS_SESSION_TOKEN") : getenv("AWSSESSIONTOKEN");
|
||||
|
||||
if(AWSACCESSKEYID != NULL || AWSSECRETACCESSKEY != NULL){
|
||||
if( (AWSACCESSKEYID == NULL && AWSSECRETACCESSKEY != NULL) ||
|
||||
(AWSACCESSKEYID != NULL && AWSSECRETACCESSKEY == NULL) ){
|
||||
if(AWSACCESSKEYID != nullptr || AWSSECRETACCESSKEY != nullptr){
|
||||
if( (AWSACCESSKEYID == nullptr && AWSSECRETACCESSKEY != nullptr) ||
|
||||
(AWSACCESSKEYID != nullptr && AWSSECRETACCESSKEY == nullptr) ){
|
||||
S3FS_PRN_EXIT("both environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY must be set together.");
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO2("access key from env variables");
|
||||
if(AWSSESSIONTOKEN != NULL){
|
||||
if(AWSSESSIONTOKEN != nullptr){
|
||||
S3FS_PRN_INFO2("session token is available");
|
||||
if(!SetAccessKeyWithSessionToken(AWSACCESSKEYID, AWSSECRETACCESSKEY, AWSSESSIONTOKEN, AutoLock::NONE)){
|
||||
S3FS_PRN_EXIT("session token is invalid.");
|
||||
@ -953,7 +1027,7 @@ bool S3fsCred::InitialS3fsCredentials()
|
||||
|
||||
// 3a - from the AWS_CREDENTIAL_FILE environment variable
|
||||
char* AWS_CREDENTIAL_FILE = getenv("AWS_CREDENTIAL_FILE");
|
||||
if(AWS_CREDENTIAL_FILE != NULL){
|
||||
if(AWS_CREDENTIAL_FILE != nullptr){
|
||||
passwd_file = AWS_CREDENTIAL_FILE;
|
||||
if(IsSetPasswdFile()){
|
||||
if(!IsReadableS3fsPasswdFile()){
|
||||
@ -978,7 +1052,7 @@ bool S3fsCred::InitialS3fsCredentials()
|
||||
|
||||
// 4 - from the default location in the users home directory
|
||||
char* HOME = getenv("HOME");
|
||||
if(HOME != NULL){
|
||||
if(HOME != nullptr){
|
||||
passwd_file = HOME;
|
||||
passwd_file += "/.passwd-s3fs";
|
||||
if(IsReadableS3fsPasswdFile()){
|
||||
@ -1070,14 +1144,21 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
|
||||
{
|
||||
AutoLock auto_lock(&token_lock);
|
||||
|
||||
if(IsSetIAMRole(AutoLock::ALREADY_LOCKED) || is_ecs || is_ibm_iam_auth){
|
||||
if(AWSAccessTokenExpire < (time(NULL) + S3fsCred::IAM_EXPIRE_MERGIN)){
|
||||
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || IsSetIAMRole(AutoLock::ALREADY_LOCKED)){
|
||||
if(AWSAccessTokenExpire < (time(nullptr) + S3fsCred::IAM_EXPIRE_MERGIN)){
|
||||
S3FS_PRN_INFO("IAM Access Token refreshing...");
|
||||
|
||||
// update
|
||||
if(!LoadIAMCredentials(AutoLock::ALREADY_LOCKED)){
|
||||
S3FS_PRN_ERR("IAM Access Token refresh failed");
|
||||
return false;
|
||||
if(!IsSetExtCredLib()){
|
||||
if(!LoadIAMCredentials(AutoLock::ALREADY_LOCKED)){
|
||||
S3FS_PRN_ERR("Access Token refresh by built-in failed");
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(!UpdateExtCredentials(AutoLock::ALREADY_LOCKED)){
|
||||
S3FS_PRN_ERR("Access Token refresh by %s(external credential library) failed", credlib.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
S3FS_PRN_INFO("IAM Access Token refreshed");
|
||||
}
|
||||
@ -1091,7 +1172,7 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
|
||||
*secret_access_key = AWSSecretAccessKey;
|
||||
}
|
||||
if(access_token){
|
||||
if(IsIBMIAMAuth() || IsSetIAMRole(AutoLock::ALREADY_LOCKED) || is_ecs || is_use_session_token){
|
||||
if(IsIBMIAMAuth() || IsSetExtCredLib() || is_ecs || is_use_session_token || IsSetIAMRole(AutoLock::ALREADY_LOCKED)){
|
||||
*access_token = AWSAccessToken;
|
||||
}else{
|
||||
access_token->erase();
|
||||
@ -1101,6 +1182,217 @@ bool S3fsCred::CheckIAMCredentialUpdate(std::string* access_key_id, std::string*
|
||||
return true;
|
||||
}
|
||||
|
||||
const char* S3fsCred::GetCredFuncVersion(bool detail) const
|
||||
{
|
||||
static constexpr char errVersion[] = "unknown";
|
||||
|
||||
if(!pFuncCredVersion){
|
||||
return errVersion;
|
||||
}
|
||||
return (*pFuncCredVersion)(detail);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods : External Credential Library
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsCred::SetExtCredLib(const char* arg)
|
||||
{
|
||||
if(!arg || strlen(arg) == 0){
|
||||
return false;
|
||||
}
|
||||
credlib = arg;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::IsSetExtCredLib() const
|
||||
{
|
||||
return !credlib.empty();
|
||||
}
|
||||
|
||||
bool S3fsCred::SetExtCredLibOpts(const char* args)
|
||||
{
|
||||
if(!args || strlen(args) == 0){
|
||||
return false;
|
||||
}
|
||||
credlib_opts = args;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::IsSetExtCredLibOpts() const
|
||||
{
|
||||
return !credlib_opts.empty();
|
||||
}
|
||||
|
||||
bool S3fsCred::InitExtCredLib()
|
||||
{
|
||||
if(!LoadExtCredLib()){
|
||||
return false;
|
||||
}
|
||||
// Initialize library
|
||||
if(!pFuncCredInit){
|
||||
S3FS_PRN_CRIT("\"InitS3fsCredential\" function pointer is nullptr, why?");
|
||||
UnloadExtCredLib();
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* popts = credlib_opts.empty() ? nullptr : credlib_opts.c_str();
|
||||
char* perrstr = nullptr;
|
||||
if(!(*pFuncCredInit)(popts, &perrstr)){
|
||||
S3FS_PRN_ERR("Could not initialize %s(external credential library) by \"InitS3fsCredential\" function : %s", credlib.c_str(), perrstr ? perrstr : "unknown");
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(perrstr){
|
||||
free(perrstr);
|
||||
}
|
||||
UnloadExtCredLib();
|
||||
return false;
|
||||
}
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(perrstr){
|
||||
free(perrstr);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::LoadExtCredLib()
|
||||
{
|
||||
if(credlib.empty()){
|
||||
return false;
|
||||
}
|
||||
UnloadExtCredLib();
|
||||
|
||||
S3FS_PRN_INFO("Load External Credential Library : %s", credlib.c_str());
|
||||
|
||||
// Open Library
|
||||
//
|
||||
// Search Library: (RPATH ->) LD_LIBRARY_PATH -> (RUNPATH ->) /etc/ld.so.cache -> /lib -> /usr/lib
|
||||
//
|
||||
if(nullptr == (hExtCredLib = dlopen(credlib.c_str(), RTLD_LAZY))){
|
||||
const char* preason = dlerror();
|
||||
S3FS_PRN_ERR("Could not load %s(external credential library) by error : %s", credlib.c_str(), preason ? preason : "unknown");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set function pointers
|
||||
if(nullptr == (pFuncCredVersion = reinterpret_cast<fp_VersionS3fsCredential>(dlsym(hExtCredLib, "VersionS3fsCredential")))){
|
||||
S3FS_PRN_ERR("%s(external credential library) does not have \"VersionS3fsCredential\" function which is required.", credlib.c_str());
|
||||
UnloadExtCredLib();
|
||||
return false;
|
||||
}
|
||||
if(nullptr == (pFuncCredUpdate = reinterpret_cast<fp_UpdateS3fsCredential>(dlsym(hExtCredLib, "UpdateS3fsCredential")))){
|
||||
S3FS_PRN_ERR("%s(external credential library) does not have \"UpdateS3fsCredential\" function which is required.", credlib.c_str());
|
||||
UnloadExtCredLib();
|
||||
return false;
|
||||
}
|
||||
if(nullptr == (pFuncCredInit = reinterpret_cast<fp_InitS3fsCredential>(dlsym(hExtCredLib, "InitS3fsCredential")))){
|
||||
S3FS_PRN_INFO("%s(external credential library) does not have \"InitS3fsCredential\" function which is optional.", credlib.c_str());
|
||||
pFuncCredInit = InitS3fsCredential; // set built-in function
|
||||
}
|
||||
if(nullptr == (pFuncCredFree = reinterpret_cast<fp_FreeS3fsCredential>(dlsym(hExtCredLib, "FreeS3fsCredential")))){
|
||||
S3FS_PRN_INFO("%s(external credential library) does not have \"FreeS3fsCredential\" function which is optional.", credlib.c_str());
|
||||
pFuncCredFree = FreeS3fsCredential; // set built-in function
|
||||
}
|
||||
S3FS_PRN_INFO("Succeed loading External Credential Library : %s", credlib.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::UnloadExtCredLib()
|
||||
{
|
||||
if(hExtCredLib){
|
||||
S3FS_PRN_INFO("Unload External Credential Library : %s", credlib.c_str());
|
||||
|
||||
// Uninitialize library
|
||||
if(!pFuncCredFree){
|
||||
S3FS_PRN_CRIT("\"FreeS3fsCredential\" function pointer is nullptr, why?");
|
||||
}else{
|
||||
char* perrstr = nullptr;
|
||||
if(!(*pFuncCredFree)(&perrstr)){
|
||||
S3FS_PRN_ERR("Could not uninitialize by \"FreeS3fsCredential\" function : %s", perrstr ? perrstr : "unknown");
|
||||
}
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(perrstr){
|
||||
free(perrstr);
|
||||
}
|
||||
}
|
||||
|
||||
// reset
|
||||
pFuncCredVersion = VersionS3fsCredential;
|
||||
pFuncCredInit = InitS3fsCredential;
|
||||
pFuncCredFree = FreeS3fsCredential;
|
||||
pFuncCredUpdate = UpdateS3fsCredential;
|
||||
|
||||
// close
|
||||
dlclose(hExtCredLib);
|
||||
hExtCredLib = nullptr;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCred::UpdateExtCredentials(AutoLock::Type type)
|
||||
{
|
||||
if(!hExtCredLib){
|
||||
S3FS_PRN_CRIT("External Credential Library is not loaded, why?");
|
||||
return false;
|
||||
}
|
||||
|
||||
AutoLock auto_lock(&token_lock, type);
|
||||
|
||||
char* paccess_key_id = nullptr;
|
||||
char* pserect_access_key = nullptr;
|
||||
char* paccess_token = nullptr;
|
||||
char* perrstr = nullptr;
|
||||
long long token_expire = 0;
|
||||
|
||||
bool result = (*pFuncCredUpdate)(&paccess_key_id, &pserect_access_key, &paccess_token, &token_expire, &perrstr);
|
||||
if(!result){
|
||||
// error occurred
|
||||
S3FS_PRN_ERR("Could not update credential by \"UpdateS3fsCredential\" function : %s", perrstr ? perrstr : "unknown");
|
||||
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
}else if(!paccess_key_id || !pserect_access_key || !paccess_token || token_expire <= 0){
|
||||
// some variables are wrong
|
||||
S3FS_PRN_ERR("After updating credential by \"UpdateS3fsCredential\" function, but some variables are wrong : paccess_key_id=%p, pserect_access_key=%p, paccess_token=%p, token_expire=%lld", paccess_key_id, pserect_access_key, paccess_token, token_expire);
|
||||
result = false;
|
||||
}else{
|
||||
// succeed updating
|
||||
AWSAccessKeyId = paccess_key_id;
|
||||
AWSSecretAccessKey = pserect_access_key;
|
||||
AWSAccessToken = paccess_token;
|
||||
AWSAccessTokenExpire = token_expire;
|
||||
}
|
||||
|
||||
// clean
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(paccess_key_id){
|
||||
free(paccess_key_id);
|
||||
}
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(pserect_access_key){
|
||||
free(pserect_access_key);
|
||||
}
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(paccess_token){
|
||||
free(paccess_token);
|
||||
}
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(perrstr){
|
||||
free(perrstr);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods: Option detection
|
||||
//-------------------------------------------------------------------
|
||||
@ -1117,6 +1409,7 @@ int S3fsCred::DetectParam(const char* arg)
|
||||
|
||||
if(is_prefix(arg, "passwd_file=")){
|
||||
SetS3fsPasswdFile(strchr(arg, '=') + sizeof(char));
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1127,11 +1420,13 @@ int S3fsCred::DetectParam(const char* arg)
|
||||
SetIAMExpiryField("\"expiration\"");
|
||||
SetIAMFieldCount(2);
|
||||
SetIMDSVersion(1, AutoLock::NONE);
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(0 == strcmp(arg, "use_session_token")){
|
||||
SetIsUseSessionToken(true);
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1146,11 +1441,13 @@ int S3fsCred::DetectParam(const char* arg)
|
||||
}
|
||||
endpoint_url = std::string(iam_endpoint) + "/identity/token";
|
||||
SetIAMCredentialsURL(endpoint_url.c_str());
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(0 == strcmp(arg, "imdsv1only")){
|
||||
SetIMDSVersion(1, AutoLock::NONE);
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1163,6 +1460,7 @@ int S3fsCred::DetectParam(const char* arg)
|
||||
SetIMDSVersion(1, AutoLock::NONE);
|
||||
SetIAMCredentialsURL("http://169.254.170.2");
|
||||
SetIAMFieldCount(5);
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1175,18 +1473,37 @@ int S3fsCred::DetectParam(const char* arg)
|
||||
// loading IAM role name in s3fs_init(), because we need to wait initializing curl.
|
||||
//
|
||||
SetIAMRoleMetadataType(true);
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
|
||||
}else if(is_prefix(arg, "iam_role=")){
|
||||
const char* role = strchr(arg, '=') + sizeof(char);
|
||||
SetIAMRole(role, AutoLock::NONE);
|
||||
SetIAMRoleMetadataType(false);
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if(is_prefix(arg, "profile=")){
|
||||
SetAwsProfileName(strchr(arg, '=') + sizeof(char));
|
||||
set_builtin_cred_opts = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(is_prefix(arg, "credlib=")){
|
||||
if(!SetExtCredLib(strchr(arg, '=') + sizeof(char))){
|
||||
S3FS_PRN_EXIT("failed to set credlib option : %s", (strchr(arg, '=') + sizeof(char)));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(is_prefix(arg, "credlib_opts=")){
|
||||
if(!SetExtCredLibOpts(strchr(arg, '=') + sizeof(char))){
|
||||
S3FS_PRN_EXIT("failed to set credlib_opts option : %s", (strchr(arg, '=') + sizeof(char)));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1204,6 +1521,7 @@ bool S3fsCred::CheckForbiddenBucketParams()
|
||||
// The first plain argument is the bucket
|
||||
if(bucket_name.empty()){
|
||||
S3FS_PRN_EXIT("missing BUCKET argument.");
|
||||
show_usage();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1216,7 +1534,7 @@ bool S3fsCred::CheckForbiddenBucketParams()
|
||||
// check bucket name for illegal characters
|
||||
size_t found = bucket_name.find_first_of("/:\\;!@#$%^&*?|+=");
|
||||
if(found != std::string::npos){
|
||||
S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character.", bucket_name.c_str());
|
||||
S3FS_PRN_EXIT("BUCKET %s -- bucket name contains an illegal character: '%c' at position %zu", bucket_name.c_str(), bucket_name[found], found);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1250,7 +1568,7 @@ bool S3fsCred::CheckAllParams()
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!S3fsCurl::IsPublicBucket() && !load_iamrole && !is_ecs){
|
||||
if(!S3fsCurl::IsPublicBucket() && !load_iamrole && !is_ecs && !IsSetExtCredLib()){
|
||||
if(!InitialS3fsCredentials()){
|
||||
return false;
|
||||
}
|
||||
@ -1271,6 +1589,32 @@ bool S3fsCred::CheckAllParams()
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// check External Credential Library
|
||||
//
|
||||
// [NOTE]
|
||||
// If credlib(_opts) option (for External Credential Library) is specified,
|
||||
// no other Credential related options can be specified. It is exclusive.
|
||||
//
|
||||
if(set_builtin_cred_opts && (IsSetExtCredLib() || IsSetExtCredLibOpts())){
|
||||
S3FS_PRN_EXIT("The \"credlib\" or \"credlib_opts\" option and other credential-related options(passwd_file, iam_role, profile, use_session_token, ecs, imdsv1only, ibm_iam_auth, ibm_iam_endpoint, etc) cannot be specified together.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load and Initialize external credential library
|
||||
if(IsSetExtCredLib() || IsSetExtCredLibOpts()){
|
||||
if(!IsSetExtCredLib()){
|
||||
S3FS_PRN_EXIT("The \"credlib_opts\"(%s) is specifyed but \"credlib\" option is not specified.", credlib_opts.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!InitExtCredLib()){
|
||||
S3FS_PRN_EXIT("failed to load the library specified by the option credlib(%s, %s).", credlib.c_str(), credlib_opts.c_str());
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO("Loaded External Credential Library:\n%s", GetCredFuncVersion(true));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#define S3FS_CRED_H_
|
||||
|
||||
#include "autolock.h"
|
||||
#include "s3fs_extcred.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedefs
|
||||
@ -39,20 +40,20 @@ typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
class S3fsCred
|
||||
{
|
||||
private:
|
||||
static const char* ALLBUCKET_FIELDS_TYPE; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static const char* KEYVAL_FIELDS_TYPE; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static const char* AWS_ACCESSKEYID;
|
||||
static const char* AWS_SECRETKEY;
|
||||
static constexpr char ALLBUCKET_FIELDS_TYPE[] = ""; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static constexpr char KEYVAL_FIELDS_TYPE[] = "\t"; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static constexpr char AWS_ACCESSKEYID[] = "AWSAccessKeyId";
|
||||
static constexpr char AWS_SECRETKEY[] = "AWSSecretKey";
|
||||
|
||||
static const int IAM_EXPIRE_MERGIN;
|
||||
static const char* ECS_IAM_ENV_VAR;
|
||||
static const char* IAMCRED_ACCESSKEYID;
|
||||
static const char* IAMCRED_SECRETACCESSKEY;
|
||||
static const char* IAMCRED_ROLEARN;
|
||||
static constexpr int IAM_EXPIRE_MERGIN = 20 * 60; // update timing
|
||||
static constexpr char ECS_IAM_ENV_VAR[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
||||
static constexpr char IAMCRED_ACCESSKEYID[] = "AccessKeyId";
|
||||
static constexpr char IAMCRED_SECRETACCESSKEY[] = "SecretAccessKey";
|
||||
static constexpr char IAMCRED_ROLEARN[] = "RoleArn";
|
||||
|
||||
static std::string bucket_name;
|
||||
|
||||
pthread_mutex_t token_lock;
|
||||
mutable pthread_mutex_t token_lock;
|
||||
bool is_lock_init;
|
||||
|
||||
std::string passwd_file;
|
||||
@ -77,23 +78,33 @@ class S3fsCred
|
||||
std::string IAM_expiry_field;
|
||||
std::string IAM_role; // Protect exclusively
|
||||
|
||||
bool set_builtin_cred_opts; // true if options other than "credlib" is set
|
||||
std::string credlib; // credlib(name or path)
|
||||
std::string credlib_opts; // options for credlib
|
||||
|
||||
void* hExtCredLib;
|
||||
fp_VersionS3fsCredential pFuncCredVersion;
|
||||
fp_InitS3fsCredential pFuncCredInit;
|
||||
fp_FreeS3fsCredential pFuncCredFree;
|
||||
fp_UpdateS3fsCredential pFuncCredUpdate;
|
||||
|
||||
public:
|
||||
static const char* IAMv2_token_url;
|
||||
static int IAMv2_token_ttl;
|
||||
static const char* IAMv2_token_ttl_hdr;
|
||||
static const char* IAMv2_token_hdr;
|
||||
static constexpr char IAMv2_token_url[] = "http://169.254.169.254/latest/api/token";
|
||||
static constexpr int IAMv2_token_ttl = 21600;
|
||||
static constexpr char IAMv2_token_ttl_hdr[] = "X-aws-ec2-metadata-token-ttl-seconds";
|
||||
static constexpr char IAMv2_token_hdr[] = "X-aws-ec2-metadata-token";
|
||||
|
||||
private:
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
|
||||
bool SetS3fsPasswdFile(const char* file);
|
||||
bool IsSetPasswdFile();
|
||||
bool IsSetPasswdFile() const;
|
||||
bool SetAwsProfileName(const char* profile_name);
|
||||
bool SetIAMRoleMetadataType(bool flag);
|
||||
|
||||
bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey, AutoLock::Type type);
|
||||
bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken, AutoLock::Type type);
|
||||
bool IsSetAccessKeys(AutoLock::Type type);
|
||||
bool IsSetAccessKeys(AutoLock::Type type) const;
|
||||
|
||||
bool SetIsECS(bool flag);
|
||||
bool SetIsUseSessionToken(bool flag);
|
||||
@ -101,25 +112,25 @@ class S3fsCred
|
||||
bool SetIsIBMIAMAuth(bool flag);
|
||||
|
||||
int SetIMDSVersion(int version, AutoLock::Type type);
|
||||
int GetIMDSVersion(AutoLock::Type type);
|
||||
int GetIMDSVersion(AutoLock::Type type) const;
|
||||
|
||||
bool SetIAMv2APIToken(const std::string& token, AutoLock::Type type);
|
||||
std::string GetIAMv2APIToken(AutoLock::Type type);
|
||||
std::string GetIAMv2APIToken(AutoLock::Type type) const;
|
||||
|
||||
bool SetIAMRole(const char* role, AutoLock::Type type);
|
||||
std::string GetIAMRole(AutoLock::Type type);
|
||||
bool IsSetIAMRole(AutoLock::Type type);
|
||||
std::string GetIAMRole(AutoLock::Type type) const;
|
||||
bool IsSetIAMRole(AutoLock::Type type) const;
|
||||
size_t SetIAMFieldCount(size_t field_count);
|
||||
std::string SetIAMCredentialsURL(const char* url);
|
||||
std::string SetIAMTokenField(const char* token_field);
|
||||
std::string SetIAMExpiryField(const char* expiry_field);
|
||||
|
||||
bool IsReadableS3fsPasswdFile();
|
||||
bool IsReadableS3fsPasswdFile() const;
|
||||
bool CheckS3fsPasswdFilePerms();
|
||||
bool ParseS3fsPasswdFile(bucketkvmap_t& resmap);
|
||||
bool ReadS3fsPasswdFile(AutoLock::Type type);
|
||||
|
||||
int CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& access_key_id, std::string& secret_access_key);
|
||||
static int CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& access_key_id, std::string& secret_access_key);
|
||||
bool ReadAwsCredentialFile(const std::string &filename, AutoLock::Type type);
|
||||
|
||||
bool InitialS3fsCredentials();
|
||||
@ -130,7 +141,17 @@ class S3fsCred
|
||||
bool SetIAMCredentials(const char* response, AutoLock::Type type);
|
||||
bool SetIAMRoleFromMetaData(const char* response, AutoLock::Type type);
|
||||
|
||||
bool CheckForbiddenBucketParams();
|
||||
bool SetExtCredLib(const char* arg);
|
||||
bool IsSetExtCredLib() const;
|
||||
bool SetExtCredLibOpts(const char* args);
|
||||
bool IsSetExtCredLibOpts() const;
|
||||
|
||||
bool InitExtCredLib();
|
||||
bool LoadExtCredLib();
|
||||
bool UnloadExtCredLib();
|
||||
bool UpdateExtCredentials(AutoLock::Type type);
|
||||
|
||||
static bool CheckForbiddenBucketParams();
|
||||
|
||||
public:
|
||||
static bool SetBucket(const char* bucket);
|
||||
@ -138,12 +159,17 @@ class S3fsCred
|
||||
|
||||
S3fsCred();
|
||||
~S3fsCred();
|
||||
S3fsCred(const S3fsCred&) = delete;
|
||||
S3fsCred(S3fsCred&&) = delete;
|
||||
S3fsCred& operator=(const S3fsCred&) = delete;
|
||||
S3fsCred& operator=(S3fsCred&&) = delete;
|
||||
|
||||
bool IsIBMIAMAuth() const { return is_ibm_iam_auth; }
|
||||
|
||||
bool LoadIAMRoleFromMetaData();
|
||||
|
||||
bool CheckIAMCredentialUpdate(std::string* access_key_id = NULL, std::string* secret_access_key = NULL, std::string* access_token = NULL);
|
||||
bool CheckIAMCredentialUpdate(std::string* access_key_id = nullptr, std::string* secret_access_key = nullptr, std::string* access_token = nullptr);
|
||||
const char* GetCredFuncVersion(bool detail) const;
|
||||
|
||||
int DetectParam(const char* arg);
|
||||
bool CheckAllParams();
|
||||
|
||||
144
src/s3fs_extcred.h
Normal file
144
src/s3fs_extcred.h
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_EXTCRED_H_
|
||||
#define S3FS_EXTCRED_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Attributes(weak) : use only in s3fs-fuse internally
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// This macro is only used inside s3fs-fuse.
|
||||
// External projects that utilize this header file substitute empty
|
||||
//values as follows:
|
||||
//
|
||||
#ifndef S3FS_FUNCATTR_WEAK
|
||||
#define S3FS_FUNCATTR_WEAK
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
//-------------------------------------------------------------------
|
||||
// Prototype for External Credential 4 functions
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// [Required] VersionS3fsCredential
|
||||
//
|
||||
// Returns the library name and version as a string.
|
||||
//
|
||||
extern const char* VersionS3fsCredential(bool detail) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//
|
||||
// [Optional] InitS3fsCredential
|
||||
//
|
||||
// A function that does the necessary initialization after the library is
|
||||
// loaded. This function is called only once immediately after loading the
|
||||
// library.
|
||||
// If there is a required initialization inside the library, implement it.
|
||||
// Implementation of this function is optional and not required. If not
|
||||
// implemented, it will not be called.
|
||||
//
|
||||
// const char* popts : String passed with the credlib_opts option. If the
|
||||
// credlib_opts option is not specified, nullptr will be
|
||||
// passed.
|
||||
// char** pperrstr : pperrstr is used to pass the error message to the
|
||||
// caller when an error occurs.
|
||||
// If this pointer is not nullptr, you can allocate memory
|
||||
// and set an error message to it. The allocated memory
|
||||
// area is freed by the caller.
|
||||
//
|
||||
extern bool InitS3fsCredential(const char* popts, char** pperrstr) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//
|
||||
// [Optional] FreeS3fsCredential
|
||||
//
|
||||
// A function that is called only once just before the library is unloaded.
|
||||
// If there is a required discard process in the library, implement it.
|
||||
// Implementation of this feature is optional and not required.
|
||||
// If not implemented, it will not be called.
|
||||
//
|
||||
// char** pperrstr : pperrstr is used to pass the error message to the
|
||||
// caller when an error occurs.
|
||||
// If this pointer is not nullptr, you can allocate memory
|
||||
// and set an error message to it. The allocated memory
|
||||
// area is freed by the caller.
|
||||
//
|
||||
extern bool FreeS3fsCredential(char** pperrstr) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//
|
||||
// [Required] UpdateS3fsCredential
|
||||
//
|
||||
// A function that updates the token.
|
||||
//
|
||||
// char** ppaccess_key_id : Allocate and set "Access Key ID" string
|
||||
// area to *ppaccess_key_id.
|
||||
// char** ppserect_access_key : Allocate and set "Access Secret Key ID"
|
||||
// string area to *ppserect_access_key.
|
||||
// char** ppaccess_token : Allocate and set "Token" string area to
|
||||
// *ppaccess_token.
|
||||
// long long* ptoken_expire : Set token expire time(time_t) value to
|
||||
// *ptoken_expire.
|
||||
// This is essentially a time_t* variable.
|
||||
// To avoid system differences about time_t
|
||||
// size, long long* is used.
|
||||
// When setting the value, cast from time_t
|
||||
// to long long to set the value.
|
||||
// char** pperrstr : pperrstr is used to pass the error message to the
|
||||
// caller when an error occurs.
|
||||
//
|
||||
// For all argument of the character string pointer(char **) set the
|
||||
// allocated string area. The allocated area is freed by the caller.
|
||||
//
|
||||
extern bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Typedef Prototype function
|
||||
//---------------------------------------------------------
|
||||
//
|
||||
// const char* VersionS3fsCredential()
|
||||
//
|
||||
typedef const char* (*fp_VersionS3fsCredential)(bool detail);
|
||||
|
||||
//
|
||||
// bool InitS3fsCredential(char** pperrstr)
|
||||
//
|
||||
typedef bool (*fp_InitS3fsCredential)(const char* popts, char** pperrstr);
|
||||
|
||||
//
|
||||
// bool FreeS3fsCredential(char** pperrstr)
|
||||
//
|
||||
typedef bool (*fp_FreeS3fsCredential)(char** pperrstr);
|
||||
|
||||
//
|
||||
// bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
|
||||
//
|
||||
typedef bool (*fp_UpdateS3fsCredential)(char** ppaccess_key_id, char** ppserect_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr);
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#endif // S3FS_EXTCRED_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -18,7 +18,6 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <string>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
@ -19,19 +19,17 @@
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_help.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Contents
|
||||
//-------------------------------------------------------------------
|
||||
static const char help_string[] =
|
||||
static constexpr char help_string[] =
|
||||
"\n"
|
||||
"Mount an Amazon S3 bucket as a file system.\n"
|
||||
"\n"
|
||||
@ -87,7 +85,7 @@ static const char help_string[] =
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, onezone_ia, reduced_redundancy,\n"
|
||||
" intelligent_tiering, glacier, and deep_archive.\n"
|
||||
" intelligent_tiering, glacier, glacier_ir, and deep_archive.\n"
|
||||
"\n"
|
||||
" use_rrs (default is disable)\n"
|
||||
" - use Amazon's Reduced Redundancy Storage.\n"
|
||||
@ -130,6 +128,8 @@ static const char help_string[] =
|
||||
" environment which value is <kms id>. You must be careful\n"
|
||||
" about that you can not use the KMS id which is not same EC2\n"
|
||||
" region.\n"
|
||||
" Additionally, if you specify SSE-KMS, your endpoints must use\n"
|
||||
" Secure Sockets Layer(SSL) or Transport Layer Security(TLS).\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
@ -204,15 +204,11 @@ static const char help_string[] =
|
||||
" of the stat cache. This option is exclusive with stat_cache_expire,\n"
|
||||
" and is left for compatibility with older versions.\n"
|
||||
"\n"
|
||||
" enable_noobj_cache (default is disable)\n"
|
||||
" - enable cache entries for the object which does not exist.\n"
|
||||
" s3fs always has to check whether file (or sub directory) exists \n"
|
||||
" under object (path) when s3fs does some command, since s3fs has \n"
|
||||
" recognized a directory which does not exist and has files or \n"
|
||||
" sub directories under itself. It increases ListBucket request \n"
|
||||
" and makes performance bad.\n"
|
||||
" You can specify this option for performance, s3fs memorizes \n"
|
||||
" in stat cache that the object (file or directory) does not exist.\n"
|
||||
" disable_noobj_cache (default is enable)\n"
|
||||
" - By default s3fs memorizes when an object does not exist up until\n"
|
||||
" the stat cache timeout. This caching can cause staleness for\n"
|
||||
" applications. If disabled, s3fs will not memorize objects and may\n"
|
||||
" cause extra HeadObject requests and reduce performance.\n"
|
||||
"\n"
|
||||
" no_check_certificate\n"
|
||||
" - server certificate won't be checked against the available \n"
|
||||
@ -255,6 +251,19 @@ static const char help_string[] =
|
||||
" The minimum value is 50 MB. -1 value means disable.\n"
|
||||
" Cannot be used with nomixupload.\n"
|
||||
"\n"
|
||||
" bucket_size (default=maximum long unsigned integer value)\n"
|
||||
" - The size of the bucket with which the corresponding\n"
|
||||
" elements of the statvfs structure will be filled. The option\n"
|
||||
" argument is an integer optionally followed by a\n"
|
||||
" multiplicative suffix (GB, GiB, TB, TiB, PB, PiB,\n"
|
||||
" EB, EiB) (no spaces in between). If no suffix is supplied,\n"
|
||||
" bytes are assumed; eg: 20000000, 30GB, 45TiB. Note that\n"
|
||||
" s3fs does not compute the actual volume size (too\n"
|
||||
" expensive): by default it will assume the maximum possible\n"
|
||||
" size; however, since this may confuse other software which\n"
|
||||
" uses s3fs, the advertised bucket size can be set with this\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default 0)\n"
|
||||
" - sets MB to ensure disk free space. This option means the\n"
|
||||
" threshold of free space size on disk which is used for the\n"
|
||||
@ -263,6 +272,15 @@ static const char help_string[] =
|
||||
" space is smaller than this value, s3fs do not use disk space\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" free_space_ratio (default=\"10\")\n"
|
||||
" - sets min free space ratio of the disk.\n"
|
||||
" The value of this option can be between 0 and 100. It will control\n"
|
||||
" the size of the cache according to this ratio to ensure that the\n"
|
||||
" idle ratio of the disk is greater than this value.\n"
|
||||
" For example, when the disk space is 50GB, the default value will\n"
|
||||
" ensure that the disk will reserve at least 50GB * 10%% = 5GB of\n"
|
||||
" remaining space.\n"
|
||||
"\n"
|
||||
" multipart_threshold (default=\"25\")\n"
|
||||
" - threshold, in MB, to use multipart upload instead of\n"
|
||||
" single-part. Must be at least 5 MB.\n"
|
||||
@ -314,6 +332,22 @@ static const char help_string[] =
|
||||
"\n"
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" streamupload (default is disable)\n"
|
||||
" - Enable stream upload.\n"
|
||||
" If this option is enabled, a sequential upload will be performed\n"
|
||||
" in parallel with the write from the part that has been written\n"
|
||||
" during a multipart upload.\n"
|
||||
" This is expected to give better performance than other upload\n"
|
||||
" functions.\n"
|
||||
" Note that this option is still experimental and may change in the\n"
|
||||
" future.\n"
|
||||
"\n"
|
||||
" max_thread_count (default is \"5\")\n"
|
||||
" - Specifies the number of threads waiting for stream uploads.\n"
|
||||
" Note that this option and Streamm Upload are still experimental\n"
|
||||
" and subject to change in the future.\n"
|
||||
" This option will be merged with \"parallel_count\" in the future.\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - Allow S3 server to check data integrity of uploads via the\n"
|
||||
" Content-MD5 header. This can add CPU overhead to transfers.\n"
|
||||
@ -347,6 +381,22 @@ static const char help_string[] =
|
||||
" ibm_iam_endpoint (default is https://iam.cloud.ibm.com)\n"
|
||||
" - sets the URL to use for IBM IAM authentication.\n"
|
||||
"\n"
|
||||
" credlib (default=\"\" which means disabled)\n"
|
||||
" - Specifies the shared library that handles the credentials\n"
|
||||
" containing the authentication token.\n"
|
||||
" If this option is specified, the specified credential and token\n"
|
||||
" processing provided by the shared library ant will be performed\n"
|
||||
" instead of the built-in credential processing.\n"
|
||||
" This option cannot be specified with passwd_file, profile,\n"
|
||||
" use_session_token, ecs, ibm_iam_auth, ibm_iam_endpoint, imdsv1only\n"
|
||||
" and iam_role option.\n"
|
||||
"\n"
|
||||
" credlib_opts (default=\"\" which means disabled)\n"
|
||||
" - Specifies the options to pass when the shared library specified\n"
|
||||
" in credlib is loaded and then initialized.\n"
|
||||
" For the string specified in this option, specify the string defined\n"
|
||||
" by the shared library.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute (xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
@ -416,23 +466,20 @@ static const char help_string[] =
|
||||
" for a object, then the object will not be able to be allowed to\n"
|
||||
" list/modify.\n"
|
||||
"\n"
|
||||
" notsup_compat_dir (disable support of alternative directory names)\n"
|
||||
" s3fs supports the three different naming schemas \"dir/\",\n"
|
||||
" \"dir\" and \"dir_$folder$\" to map directory names to S3\n"
|
||||
" objects and vice versa. As a fourth variant, directories can be\n"
|
||||
" compat_dir (enable support of alternative directory names)\n"
|
||||
" s3fs supports two different naming schemas \"dir/\" and\n"
|
||||
" \"dir\" to map directory names to S3 objects and\n"
|
||||
" vice versa by default. As a third variant, directories can be\n"
|
||||
" determined indirectly if there is a file object with a path (e.g.\n"
|
||||
" \"/dir/file\") but without the parent directory.\n"
|
||||
" This option enables a fourth variant, \"dir_$folder$\", created by\n"
|
||||
" older applications.\n"
|
||||
" \n"
|
||||
" S3fs uses only the first schema \"dir/\" to create S3 objects for\n"
|
||||
" directories."
|
||||
" \n"
|
||||
" The support for these different naming schemas causes an increased\n"
|
||||
" communication effort.\n"
|
||||
" \n"
|
||||
" If all applications exclusively use the \"dir/\" naming scheme and\n"
|
||||
" the bucket does not contain any objects with a different naming \n"
|
||||
" scheme, this option can be used to disable support for alternative\n"
|
||||
" naming schemes. This reduces access time and can save costs.\nq"
|
||||
"\n"
|
||||
" use_wtf8 - support arbitrary file system encoding.\n"
|
||||
" S3 requires all object names to be valid UTF-8. But some\n"
|
||||
@ -460,6 +507,26 @@ static const char help_string[] =
|
||||
" If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n"
|
||||
" is checked as well.\n"
|
||||
"\n"
|
||||
" proxy (default=\"\")\n"
|
||||
" This option specifies a proxy to S3 server.\n"
|
||||
" Specify the proxy with '[<scheme://]hostname(fqdn)[:<port>]' formatted.\n"
|
||||
" '<schema>://' can be omitted, and 'http://' is used when omitted.\n"
|
||||
" Also, ':<port>' can also be omitted. If omitted, port 443 is used for\n"
|
||||
" HTTPS schema, and port 1080 is used otherwise.\n"
|
||||
" This option is the same as the curl command's '--proxy(-x)' option and\n"
|
||||
" libcurl's 'CURLOPT_PROXY' flag.\n"
|
||||
" This option is equivalent to and takes precedence over the environment\n"
|
||||
" variables 'http_proxy', 'all_proxy', etc.\n"
|
||||
"\n"
|
||||
" proxy_cred_file (default=\"\")\n"
|
||||
" This option specifies the file that describes the username and\n"
|
||||
" passphrase for authentication of the proxy when the HTTP schema\n"
|
||||
" proxy is specified by the 'proxy' option.\n"
|
||||
" Username and passphrase are valid only for HTTP schema. If the HTTP\n"
|
||||
" proxy does not require authentication, this option is not required.\n"
|
||||
" Separate the username and passphrase with a ':' character and\n"
|
||||
" specify each as a URL-encoded string.\n"
|
||||
"\n"
|
||||
" logfile - specify the log output file.\n"
|
||||
" s3fs outputs the log file to syslog. Alternatively, if s3fs is\n"
|
||||
" started with the \"-f\" option specified, the log will be output\n"
|
||||
@ -499,6 +566,12 @@ static const char help_string[] =
|
||||
" check result to that file. The file path parameter can be omitted.\n"
|
||||
" If omitted, the result will be output to stdout or syslog.\n"
|
||||
"\n"
|
||||
" update_parent_dir_stat (default is disable)\n"
|
||||
" The parent directory's mtime and ctime are updated when a file or\n"
|
||||
" directory is created or deleted (when the parent directory's inode is\n"
|
||||
" updated).\n"
|
||||
" By default, parent directory statistics are not updated.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
@ -567,7 +640,7 @@ void show_version()
|
||||
|
||||
const char* short_version()
|
||||
{
|
||||
static const char short_ver[] = "s3fs version " VERSION "(" COMMIT_HASH_VAL ")";
|
||||
static constexpr char short_ver[] = "s3fs version " VERSION "(" COMMIT_HASH_VAL ")";
|
||||
return short_ver;
|
||||
}
|
||||
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
@ -29,14 +30,13 @@
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : variables
|
||||
//-------------------------------------------------------------------
|
||||
const int S3fsLog::NEST_MAX;
|
||||
const char* const S3fsLog::nest_spaces[S3fsLog::NEST_MAX] = {"", " ", " ", " "};
|
||||
const char S3fsLog::LOGFILEENV[] = "S3FS_LOGFILE";
|
||||
const char S3fsLog::MSGTIMESTAMP[] = "S3FS_MSGTIMESTAMP";
|
||||
S3fsLog* S3fsLog::pSingleton = NULL;
|
||||
constexpr char S3fsLog::LOGFILEENV[];
|
||||
constexpr const char* S3fsLog::nest_spaces[];
|
||||
constexpr char S3fsLog::MSGTIMESTAMP[];
|
||||
S3fsLog* S3fsLog::pSingleton = nullptr;
|
||||
S3fsLog::s3fs_log_level S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
|
||||
FILE* S3fsLog::logfp = NULL;
|
||||
std::string* S3fsLog::plogfile = NULL;
|
||||
FILE* S3fsLog::logfp = nullptr;
|
||||
std::string S3fsLog::logfile;
|
||||
bool S3fsLog::time_stamp = true;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -59,7 +59,7 @@ std::string S3fsLog::GetCurrentTime()
|
||||
now.tv_sec = tsnow.tv_sec;
|
||||
now.tv_usec = (tsnow.tv_nsec / 1000);
|
||||
}else{
|
||||
gettimeofday(&now, NULL);
|
||||
gettimeofday(&now, nullptr);
|
||||
}
|
||||
strftime(tmp, sizeof(tmp), "%Y-%m-%dT%H:%M:%S", gmtime_r(&now.tv_sec, &res));
|
||||
current_time << tmp << "." << std::setfill('0') << std::setw(3) << (now.tv_usec / 1000) << "Z ";
|
||||
@ -70,7 +70,7 @@ std::string S3fsLog::GetCurrentTime()
|
||||
bool S3fsLog::SetLogfile(const char* pfile)
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return false;
|
||||
}
|
||||
return S3fsLog::pSingleton->LowSetLogfile(pfile);
|
||||
@ -79,25 +79,25 @@ bool S3fsLog::SetLogfile(const char* pfile)
|
||||
bool S3fsLog::ReopenLogfile()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return false;
|
||||
}
|
||||
if(!S3fsLog::logfp){
|
||||
S3FS_PRN_INFO("Currently the log file is output to stdout/stderr.");
|
||||
return true;
|
||||
}
|
||||
if(!S3fsLog::plogfile){
|
||||
S3FS_PRN_ERR("There is a problem with the path to the log file being NULL.");
|
||||
if(!S3fsLog::logfile.empty()){
|
||||
S3FS_PRN_ERR("There is a problem with the path to the log file being empty.");
|
||||
return false;
|
||||
}
|
||||
std::string tmp = *(S3fsLog::plogfile);
|
||||
std::string tmp = S3fsLog::logfile;
|
||||
return S3fsLog::pSingleton->LowSetLogfile(tmp.c_str());
|
||||
}
|
||||
|
||||
S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
return S3fsLog::pSingleton->LowSetLogLevel(level);
|
||||
@ -106,7 +106,7 @@ S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
|
||||
S3fsLog::s3fs_log_level S3fsLog::BumpupLogLevel()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
return S3fsLog::pSingleton->LowBumpupLogLevel();
|
||||
@ -139,15 +139,12 @@ S3fsLog::~S3fsLog()
|
||||
{
|
||||
if(S3fsLog::pSingleton == this){
|
||||
FILE* oldfp = S3fsLog::logfp;
|
||||
S3fsLog::logfp = NULL;
|
||||
S3fsLog::logfp = nullptr;
|
||||
if(oldfp && 0 != fclose(oldfp)){
|
||||
S3FS_PRN_ERR("Could not close old log file(%s), but continue...", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
|
||||
S3FS_PRN_ERR("Could not close old log file(%s), but continue...", (S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
|
||||
}
|
||||
if(S3fsLog::plogfile){
|
||||
delete S3fsLog::plogfile;
|
||||
S3fsLog::plogfile = NULL;
|
||||
}
|
||||
S3fsLog::pSingleton = NULL;
|
||||
S3fsLog::logfile.clear();
|
||||
S3fsLog::pSingleton = nullptr;
|
||||
S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
|
||||
|
||||
closelog();
|
||||
@ -163,12 +160,12 @@ bool S3fsLog::LowLoadEnv()
|
||||
return false;
|
||||
}
|
||||
char* pEnvVal;
|
||||
if(NULL != (pEnvVal = getenv(S3fsLog::LOGFILEENV))){
|
||||
if(nullptr != (pEnvVal = getenv(S3fsLog::LOGFILEENV))){
|
||||
if(!SetLogfile(pEnvVal)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(NULL != (pEnvVal = getenv(S3fsLog::MSGTIMESTAMP))){
|
||||
if(nullptr != (pEnvVal = getenv(S3fsLog::MSGTIMESTAMP))){
|
||||
if(0 == strcasecmp(pEnvVal, "true") || 0 == strcasecmp(pEnvVal, "yes") || 0 == strcasecmp(pEnvVal, "1")){
|
||||
S3fsLog::time_stamp = true;
|
||||
}else if(0 == strcasecmp(pEnvVal, "false") || 0 == strcasecmp(pEnvVal, "no") || 0 == strcasecmp(pEnvVal, "0")){
|
||||
@ -190,14 +187,11 @@ bool S3fsLog::LowSetLogfile(const char* pfile)
|
||||
if(!pfile){
|
||||
// close log file if it is opened
|
||||
if(S3fsLog::logfp && 0 != fclose(S3fsLog::logfp)){
|
||||
S3FS_PRN_ERR("Could not close log file(%s).", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
|
||||
S3FS_PRN_ERR("Could not close log file(%s).", (S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
|
||||
return false;
|
||||
}
|
||||
S3fsLog::logfp = NULL;
|
||||
if(S3fsLog::plogfile){
|
||||
delete S3fsLog::plogfile;
|
||||
S3fsLog::plogfile = NULL;
|
||||
}
|
||||
S3fsLog::logfp = nullptr;
|
||||
S3fsLog::logfile.clear();
|
||||
}else{
|
||||
// open new log file
|
||||
//
|
||||
@ -205,22 +199,20 @@ bool S3fsLog::LowSetLogfile(const char* pfile)
|
||||
// It will reopen even if it is the same file.
|
||||
//
|
||||
FILE* newfp;
|
||||
if(NULL == (newfp = fopen(pfile, "a+"))){
|
||||
if(nullptr == (newfp = fopen(pfile, "a+"))){
|
||||
S3FS_PRN_ERR("Could not open log file(%s).", pfile);
|
||||
return false;
|
||||
}
|
||||
|
||||
// switch new log file and close old log file if it is opened
|
||||
FILE* oldfp = S3fsLog::logfp;
|
||||
S3fsLog::logfp = newfp;
|
||||
if(oldfp && 0 != fclose(oldfp)){
|
||||
S3FS_PRN_ERR("Could not close old log file(%s).", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
|
||||
S3fsLog::logfp = oldfp;
|
||||
S3FS_PRN_ERR("Could not close old log file(%s).", (!S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
|
||||
fclose(newfp);
|
||||
return false;
|
||||
}
|
||||
delete S3fsLog::plogfile;
|
||||
S3fsLog::plogfile = new std::string(pfile);
|
||||
S3fsLog::logfp = newfp;
|
||||
S3fsLog::logfile = pfile;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -262,24 +254,22 @@ void s3fs_low_logprn(S3fsLog::s3fs_log_level level, const char* file, const char
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){
|
||||
va_list va;
|
||||
va_start(va, fmt);
|
||||
size_t len = vsnprintf(NULL, 0, fmt, va) + 1;
|
||||
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
|
||||
va_end(va);
|
||||
|
||||
char *message = new char[len];
|
||||
std::unique_ptr<char[]> message(new char[len]);
|
||||
va_start(va, fmt);
|
||||
vsnprintf(message, len, fmt, va);
|
||||
vsnprintf(message.get(), len, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
if(foreground || S3fsLog::IsSetLogFile()){
|
||||
S3fsLog::SeekEnd();
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message);
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message.get());
|
||||
S3fsLog::Flush();
|
||||
}else{
|
||||
// TODO: why does this differ from s3fs_low_logprn2?
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message);
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message.get());
|
||||
}
|
||||
|
||||
delete[] message;
|
||||
}
|
||||
}
|
||||
|
||||
@ -288,23 +278,21 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){
|
||||
va_list va;
|
||||
va_start(va, fmt);
|
||||
size_t len = vsnprintf(NULL, 0, fmt, va) + 1;
|
||||
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
|
||||
va_end(va);
|
||||
|
||||
char *message = new char[len];
|
||||
std::unique_ptr<char[]> message(new char[len]);
|
||||
va_start(va, fmt);
|
||||
vsnprintf(message, len, fmt, va);
|
||||
vsnprintf(message.get(), len, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
if(foreground || S3fsLog::IsSetLogFile()){
|
||||
S3fsLog::SeekEnd();
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message);
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message.get());
|
||||
S3fsLog::Flush();
|
||||
}else{
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message);
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message.get());
|
||||
}
|
||||
|
||||
delete[] message;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -23,9 +23,12 @@
|
||||
|
||||
#include <cstdarg>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include <syslog.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#ifdef CLOCK_MONOTONIC_COARSE
|
||||
#define S3FS_CLOCK_MONOTONIC CLOCK_MONOTONIC_COARSE
|
||||
#else
|
||||
@ -48,15 +51,15 @@ class S3fsLog
|
||||
};
|
||||
|
||||
protected:
|
||||
static const int NEST_MAX = 4;
|
||||
static const char* const nest_spaces[NEST_MAX];
|
||||
static const char LOGFILEENV[];
|
||||
static const char MSGTIMESTAMP[];
|
||||
static constexpr int NEST_MAX = 4;
|
||||
static constexpr const char* nest_spaces[NEST_MAX] = {"", " ", " ", " "};
|
||||
static constexpr char LOGFILEENV[] = "S3FS_LOGFILE";
|
||||
static constexpr char MSGTIMESTAMP[] = "S3FS_MSGTIMESTAMP";
|
||||
|
||||
static S3fsLog* pSingleton;
|
||||
static s3fs_log_level debug_level;
|
||||
static FILE* logfp;
|
||||
static std::string* plogfile;
|
||||
static std::string logfile;
|
||||
static bool time_stamp;
|
||||
|
||||
protected:
|
||||
@ -73,7 +76,7 @@ class S3fsLog
|
||||
static bool IsS3fsLogInfo() { return IsS3fsLogLevel(LEVEL_INFO); }
|
||||
static bool IsS3fsLogDbg() { return IsS3fsLogLevel(LEVEL_DBG); }
|
||||
|
||||
static int GetSyslogLevel(s3fs_log_level level)
|
||||
static constexpr int GetSyslogLevel(s3fs_log_level level)
|
||||
{
|
||||
return ( LEVEL_DBG == (level & LEVEL_DBG) ? LOG_DEBUG :
|
||||
LEVEL_INFO == (level & LEVEL_DBG) ? LOG_INFO :
|
||||
@ -83,7 +86,7 @@ class S3fsLog
|
||||
|
||||
static std::string GetCurrentTime();
|
||||
|
||||
static const char* GetLevelString(s3fs_log_level level)
|
||||
static constexpr const char* GetLevelString(s3fs_log_level level)
|
||||
{
|
||||
return ( LEVEL_DBG == (level & LEVEL_DBG) ? "[DBG] " :
|
||||
LEVEL_INFO == (level & LEVEL_DBG) ? "[INF] " :
|
||||
@ -91,18 +94,14 @@ class S3fsLog
|
||||
LEVEL_ERR == (level & LEVEL_DBG) ? "[ERR] " : "[CRT] " );
|
||||
}
|
||||
|
||||
static const char* GetS3fsLogNest(int nest)
|
||||
static constexpr const char* GetS3fsLogNest(int nest)
|
||||
{
|
||||
if(nest < NEST_MAX){
|
||||
return nest_spaces[nest];
|
||||
}else{
|
||||
return nest_spaces[NEST_MAX - 1];
|
||||
}
|
||||
return nest_spaces[nest < NEST_MAX ? nest : NEST_MAX - 1];
|
||||
}
|
||||
|
||||
static bool IsSetLogFile()
|
||||
{
|
||||
return (NULL != logfp);
|
||||
return (nullptr != logfp);
|
||||
}
|
||||
|
||||
static FILE* GetOutputLogFile()
|
||||
@ -137,6 +136,10 @@ class S3fsLog
|
||||
|
||||
explicit S3fsLog();
|
||||
~S3fsLog();
|
||||
S3fsLog(const S3fsLog&) = delete;
|
||||
S3fsLog(S3fsLog&&) = delete;
|
||||
S3fsLog& operator=(const S3fsLog&) = delete;
|
||||
S3fsLog& operator=(S3fsLog&&) = delete;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -227,6 +230,34 @@ void s3fs_low_logprn2(S3fsLog::s3fs_log_level level, int nest, const char* file,
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
|
||||
|
||||
// Macros to print log with fuse context
|
||||
#define PRINT_FUSE_CTX(level, indent, fmt, ...) do { \
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){ \
|
||||
struct fuse_context *ctx = fuse_get_context(); \
|
||||
if(ctx == NULL){ \
|
||||
S3FS_LOW_LOGPRN2(level, indent, fmt, ##__VA_ARGS__); \
|
||||
}else{ \
|
||||
S3FS_LOW_LOGPRN2(level, indent, fmt"[pid=%u,uid=%u,gid=%u]",\
|
||||
##__VA_ARGS__, \
|
||||
(unsigned int)(ctx->pid), \
|
||||
(unsigned int)(ctx->uid), \
|
||||
(unsigned int)(ctx->gid)); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define FUSE_CTX_INFO(fmt, ...) do { \
|
||||
PRINT_FUSE_CTX(S3fsLog::LEVEL_INFO, 0, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define FUSE_CTX_INFO1(fmt, ...) do { \
|
||||
PRINT_FUSE_CTX(S3fsLog::LEVEL_INFO, 1, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define FUSE_CTX_DBG(fmt, ...) do { \
|
||||
PRINT_FUSE_CTX(S3fsLog::LEVEL_DBG, 0, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#endif // S3FS_LOGGER_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -23,20 +23,21 @@
|
||||
#include <unistd.h>
|
||||
#include <cerrno>
|
||||
#include <grp.h>
|
||||
#include <memory>
|
||||
#include <pwd.h>
|
||||
#include <libgen.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/utsname.h>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_help.h"
|
||||
#include "autolock.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
@ -94,32 +95,27 @@ std::string get_username(uid_t uid)
|
||||
{
|
||||
size_t maxlen = max_password_size;
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
struct passwd* ppwinfo = nullptr;
|
||||
|
||||
// make buffer
|
||||
pbuf = new char[maxlen];
|
||||
std::unique_ptr<char[]> pbuf(new char[maxlen]);
|
||||
// get pw information
|
||||
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
|
||||
delete[] pbuf;
|
||||
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf.get(), maxlen, &ppwinfo))){
|
||||
maxlen *= 2;
|
||||
pbuf = new char[maxlen];
|
||||
pbuf.reset(new char[maxlen]);
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get pw information(%d).", result);
|
||||
delete[] pbuf;
|
||||
return std::string("");
|
||||
return "";
|
||||
}
|
||||
|
||||
// check pw
|
||||
if(NULL == ppwinfo){
|
||||
delete[] pbuf;
|
||||
return std::string("");
|
||||
if(nullptr == ppwinfo){
|
||||
return "";
|
||||
}
|
||||
std::string name = SAFESTRPTR(ppwinfo->pw_name);
|
||||
delete[] pbuf;
|
||||
return name;
|
||||
}
|
||||
|
||||
@ -127,29 +123,25 @@ int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
size_t maxlen = max_group_name_length;
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct group ginfo;
|
||||
struct group* pginfo = NULL;
|
||||
struct group* pginfo = nullptr;
|
||||
|
||||
// make buffer
|
||||
pbuf = new char[maxlen];
|
||||
std::unique_ptr<char[]> pbuf(new char[maxlen]);
|
||||
// get group information
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
delete[] pbuf;
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf.get(), maxlen, &pginfo))){
|
||||
maxlen *= 2;
|
||||
pbuf = new char[maxlen];
|
||||
pbuf.reset(new char[maxlen]);
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get group information(%d).", result);
|
||||
delete[] pbuf;
|
||||
return -result;
|
||||
}
|
||||
|
||||
// check group
|
||||
if(NULL == pginfo){
|
||||
if(nullptr == pginfo){
|
||||
// there is not gid in group.
|
||||
delete[] pbuf;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -159,20 +151,70 @@ int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){
|
||||
if(username == *ppgr_mem){
|
||||
// Found username in group.
|
||||
delete[] pbuf;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
delete[] pbuf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility for file and directory
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// basename/dirname returns a static variable pointer as the return value.
|
||||
// Normally this shouldn't be a problem, but in macos10 we found a case
|
||||
// where dirname didn't receive its return value correctly due to thread
|
||||
// conflicts.
|
||||
// To avoid this, exclusive control is performed by mutex.
|
||||
//
|
||||
static pthread_mutex_t* pbasename_lock = nullptr;
|
||||
|
||||
bool init_basename_lock()
|
||||
{
|
||||
if(pbasename_lock){
|
||||
S3FS_PRN_ERR("already initialized mutex for posix dirname/basename function.");
|
||||
return false;
|
||||
}
|
||||
pbasename_lock = new pthread_mutex_t;
|
||||
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_init(pbasename_lock, &attr))){
|
||||
S3FS_PRN_ERR("failed to init pbasename_lock: %d.", result);
|
||||
delete pbasename_lock;
|
||||
pbasename_lock = nullptr;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool destroy_basename_lock()
|
||||
{
|
||||
if(!pbasename_lock){
|
||||
S3FS_PRN_ERR("the mutex for posix dirname/basename function is not initialized.");
|
||||
return false;
|
||||
}
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_destroy(pbasename_lock))){
|
||||
S3FS_PRN_ERR("failed to destroy pbasename_lock: %d", result);
|
||||
return false;
|
||||
}
|
||||
delete pbasename_lock;
|
||||
pbasename_lock = nullptr;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string mydirname(const std::string& path)
|
||||
{
|
||||
return std::string(dirname((char*)path.c_str()));
|
||||
AutoLock auto_lock(pbasename_lock);
|
||||
|
||||
return mydirname(path.c_str());
|
||||
}
|
||||
|
||||
// safe variant of dirname
|
||||
@ -180,14 +222,20 @@ std::string mydirname(const std::string& path)
|
||||
std::string mydirname(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return std::string("");
|
||||
return "";
|
||||
}
|
||||
return mydirname(std::string(path));
|
||||
|
||||
char *buf = strdup(path);
|
||||
std::string result = dirname(buf);
|
||||
free(buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string mybasename(const std::string& path)
|
||||
{
|
||||
return std::string(basename((char*)path.c_str()));
|
||||
AutoLock auto_lock(pbasename_lock);
|
||||
|
||||
return mybasename(path.c_str());
|
||||
}
|
||||
|
||||
// safe variant of basename
|
||||
@ -195,9 +243,13 @@ std::string mybasename(const std::string& path)
|
||||
std::string mybasename(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return std::string("");
|
||||
return "";
|
||||
}
|
||||
return mybasename(std::string(path));
|
||||
|
||||
char *buf = strdup(path);
|
||||
std::string result = basename(buf);
|
||||
free(buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
// mkdir --parents
|
||||
@ -297,7 +349,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
|
||||
DIR* dp;
|
||||
struct dirent* dent;
|
||||
|
||||
if(NULL == (dp = opendir(dir))){
|
||||
if(nullptr == (dp = opendir(dir))){
|
||||
S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno);
|
||||
return false;
|
||||
}
|
||||
@ -348,7 +400,7 @@ bool compare_sysname(const char* target)
|
||||
// The buffer size of sysname member in struct utsname is
|
||||
// OS dependent, but 512 bytes is sufficient for now.
|
||||
//
|
||||
static char* psysname = NULL;
|
||||
static const char* psysname = nullptr;
|
||||
static char sysname[512];
|
||||
if(!psysname){
|
||||
struct utsname sysinfo;
|
||||
@ -392,6 +444,132 @@ void print_launch_message(int argc, char** argv)
|
||||
S3FS_PRN_LAUNCH_INFO("%s", message.c_str());
|
||||
}
|
||||
|
||||
//
|
||||
// result: -1 ts1 < ts2
|
||||
// 0 ts1 == ts2
|
||||
// 1 ts1 > ts2
|
||||
//
|
||||
int compare_timespec(const struct timespec& ts1, const struct timespec& ts2)
|
||||
{
|
||||
if(ts1.tv_sec < ts2.tv_sec){
|
||||
return -1;
|
||||
}else if(ts1.tv_sec > ts2.tv_sec){
|
||||
return 1;
|
||||
}else{
|
||||
if(ts1.tv_nsec < ts2.tv_nsec){
|
||||
return -1;
|
||||
}else if(ts1.tv_nsec > ts2.tv_nsec){
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// result: -1 st < ts
|
||||
// 0 st == ts
|
||||
// 1 st > ts
|
||||
//
|
||||
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts)
|
||||
{
|
||||
struct timespec st_ts;
|
||||
set_stat_to_timespec(st, type, st_ts);
|
||||
|
||||
return compare_timespec(st_ts, ts);
|
||||
}
|
||||
|
||||
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts)
|
||||
{
|
||||
if(stat_time_type::ATIME == type){
|
||||
#if defined(__APPLE__)
|
||||
st.st_atime = ts.tv_sec;
|
||||
st.st_atimespec.tv_nsec = ts.tv_nsec;
|
||||
#else
|
||||
st.st_atim.tv_sec = ts.tv_sec;
|
||||
st.st_atim.tv_nsec = ts.tv_nsec;
|
||||
#endif
|
||||
}else if(stat_time_type::MTIME == type){
|
||||
#if defined(__APPLE__)
|
||||
st.st_mtime = ts.tv_sec;
|
||||
st.st_mtimespec.tv_nsec = ts.tv_nsec;
|
||||
#else
|
||||
st.st_mtim.tv_sec = ts.tv_sec;
|
||||
st.st_mtim.tv_nsec = ts.tv_nsec;
|
||||
#endif
|
||||
}else if(stat_time_type::CTIME == type){
|
||||
#if defined(__APPLE__)
|
||||
st.st_ctime = ts.tv_sec;
|
||||
st.st_ctimespec.tv_nsec = ts.tv_nsec;
|
||||
#else
|
||||
st.st_ctim.tv_sec = ts.tv_sec;
|
||||
st.st_ctim.tv_nsec = ts.tv_nsec;
|
||||
#endif
|
||||
}else{
|
||||
S3FS_PRN_ERR("unknown type(%d), so skip to set value.", static_cast<int>(type));
|
||||
}
|
||||
}
|
||||
|
||||
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts)
|
||||
{
|
||||
if(stat_time_type::ATIME == type){
|
||||
#if defined(__APPLE__)
|
||||
ts.tv_sec = st.st_atime;
|
||||
ts.tv_nsec = st.st_atimespec.tv_nsec;
|
||||
#else
|
||||
ts = st.st_atim;
|
||||
#endif
|
||||
}else if(stat_time_type::MTIME == type){
|
||||
#if defined(__APPLE__)
|
||||
ts.tv_sec = st.st_mtime;
|
||||
ts.tv_nsec = st.st_mtimespec.tv_nsec;
|
||||
#else
|
||||
ts = st.st_mtim;
|
||||
#endif
|
||||
}else if(stat_time_type::CTIME == type){
|
||||
#if defined(__APPLE__)
|
||||
ts.tv_sec = st.st_ctime;
|
||||
ts.tv_nsec = st.st_ctimespec.tv_nsec;
|
||||
#else
|
||||
ts = st.st_ctim;
|
||||
#endif
|
||||
}else{
|
||||
S3FS_PRN_ERR("unknown type(%d), so use 0 as timespec.", static_cast<int>(type));
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
return &ts;
|
||||
}
|
||||
|
||||
std::string str_stat_time(const struct stat& st, stat_time_type type)
|
||||
{
|
||||
struct timespec ts;
|
||||
return str(*set_stat_to_timespec(st, type, ts));
|
||||
}
|
||||
|
||||
struct timespec* s3fs_realtime(struct timespec& ts)
|
||||
{
|
||||
if(-1 == clock_gettime(static_cast<clockid_t>(CLOCK_REALTIME), &ts)){
|
||||
S3FS_PRN_WARN("failed to clock_gettime by errno(%d)", errno);
|
||||
ts.tv_sec = time(nullptr);
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
return &ts;
|
||||
}
|
||||
|
||||
std::string s3fs_str_realtime()
|
||||
{
|
||||
struct timespec ts;
|
||||
return str(*s3fs_realtime(ts));
|
||||
}
|
||||
|
||||
int s3fs_fclose(FILE* fp)
|
||||
{
|
||||
if(fp == nullptr){
|
||||
return 0;
|
||||
}
|
||||
return fclose(fp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -21,6 +21,8 @@
|
||||
#ifndef S3FS_S3FS_UTIL_H_
|
||||
#define S3FS_S3FS_UTIL_H_
|
||||
|
||||
#include <functional>
|
||||
|
||||
#ifndef CLOCK_REALTIME
|
||||
#define CLOCK_REALTIME 0
|
||||
#endif
|
||||
@ -40,6 +42,8 @@ void init_sysconf_vars();
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
bool init_basename_lock();
|
||||
bool destroy_basename_lock();
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(const std::string& path);
|
||||
std::string mybasename(const char* path);
|
||||
@ -54,6 +58,57 @@ bool compare_sysname(const char* target);
|
||||
|
||||
void print_launch_message(int argc, char** argv);
|
||||
|
||||
//
|
||||
// Utility for nanosecond time(timespec)
|
||||
//
|
||||
enum class stat_time_type{
|
||||
ATIME,
|
||||
MTIME,
|
||||
CTIME
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility for nanosecond time(timespec)
|
||||
//-------------------------------------------------------------------
|
||||
static constexpr struct timespec S3FS_OMIT_TS = {0, UTIME_OMIT};
|
||||
|
||||
int compare_timespec(const struct timespec& ts1, const struct timespec& ts2);
|
||||
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts);
|
||||
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts);
|
||||
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts);
|
||||
std::string str_stat_time(const struct stat& st, stat_time_type type);
|
||||
struct timespec* s3fs_realtime(struct timespec& ts);
|
||||
std::string s3fs_str_realtime();
|
||||
|
||||
// Wrap fclose since it is illegal to take the address of a stdlib function
|
||||
int s3fs_fclose(FILE* fp);
|
||||
|
||||
class scope_guard {
|
||||
public:
|
||||
template<class Callable>
|
||||
explicit scope_guard(Callable&& undo_func)
|
||||
: func(std::forward<Callable>(undo_func))
|
||||
{}
|
||||
|
||||
~scope_guard() {
|
||||
if(func != nullptr) {
|
||||
func();
|
||||
}
|
||||
}
|
||||
|
||||
void dismiss() {
|
||||
func = nullptr;
|
||||
}
|
||||
|
||||
scope_guard(const scope_guard&) = delete;
|
||||
scope_guard(scope_guard&& other) = delete;
|
||||
scope_guard& operator=(const scope_guard&) = delete;
|
||||
scope_guard& operator=(scope_guard&&) = delete;
|
||||
|
||||
private:
|
||||
std::function<void()> func;
|
||||
};
|
||||
|
||||
#endif // S3FS_S3FS_UTIL_H_
|
||||
|
||||
/*
|
||||
|
||||
210
src/s3fs_xml.cpp
210
src/s3fs_xml.cpp
@ -20,22 +20,26 @@
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <libxml/xpathInternals.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "s3objlist.h"
|
||||
#include "autolock.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
static const char c_strErrorObjectName[] = "FILE or SUBDIR in DIR";
|
||||
static constexpr char c_strErrorObjectName[] = "FILE or SUBDIR in DIR";
|
||||
|
||||
// [NOTE]
|
||||
// mutex for static variables in GetXmlNsUrl
|
||||
//
|
||||
static pthread_mutex_t* pxml_parser_mutex = NULL;
|
||||
static pthread_mutex_t* pxml_parser_mutex = nullptr;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
@ -55,9 +59,9 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
|
||||
|
||||
AutoLock lock(pxml_parser_mutex);
|
||||
|
||||
if((tmLast + 60) < time(NULL)){
|
||||
if((tmLast + 60) < time(nullptr)){
|
||||
// refresh
|
||||
tmLast = time(NULL);
|
||||
tmLast = time(nullptr);
|
||||
strNs = "";
|
||||
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
|
||||
if(pRootNode){
|
||||
@ -66,7 +70,7 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
|
||||
if(nslist[0] && nslist[0]->href){
|
||||
int len = xmlStrlen(nslist[0]->href);
|
||||
if(0 < len){
|
||||
strNs = std::string((const char*)(nslist[0]->href), len);
|
||||
strNs = std::string(reinterpret_cast<const char*>(nslist[0]->href), len);
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREE(nslist);
|
||||
@ -82,19 +86,18 @@ static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
static unique_ptr_xmlChar get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
{
|
||||
xmlXPathObjectPtr marker_xp;
|
||||
std::string xmlnsurl;
|
||||
std::string exp_string;
|
||||
|
||||
if(!doc){
|
||||
return NULL;
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
|
||||
exp_string = "/s3:ListBucketResult/s3:";
|
||||
} else {
|
||||
exp_string = "/ListBucketResult/";
|
||||
@ -102,82 +105,75 @@ static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
|
||||
exp_string += exp;
|
||||
|
||||
if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
unique_ptr_xmlXPathObject marker_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_string.c_str()), ctx.get()), xmlXPathFreeObject);
|
||||
if(nullptr == marker_xp){
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){
|
||||
S3FS_PRN_ERR("marker_xp->nodesetval is empty.");
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
S3FS_PRN_INFO("marker_xp->nodesetval is empty.");
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
xmlNodeSetPtr nodes = marker_xp->nodesetval;
|
||||
xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
|
||||
unique_ptr_xmlChar result(xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_prefix(xmlDocPtr doc)
|
||||
static unique_ptr_xmlChar get_prefix(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "Prefix");
|
||||
}
|
||||
|
||||
xmlChar* get_next_continuation_token(xmlDocPtr doc)
|
||||
unique_ptr_xmlChar get_next_continuation_token(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextContinuationToken");
|
||||
}
|
||||
|
||||
xmlChar* get_next_marker(xmlDocPtr doc)
|
||||
unique_ptr_xmlChar get_next_marker(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextMarker");
|
||||
}
|
||||
|
||||
// return: the pointer to object name on allocated memory.
|
||||
// the pointer to "c_strErrorObjectName".(not allocated)
|
||||
// NULL(a case of something error occurred)
|
||||
// nullptr(a case of something error occurred)
|
||||
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
|
||||
{
|
||||
// Get full path
|
||||
xmlChar* fullpath = xmlNodeListGetString(doc, node, 1);
|
||||
unique_ptr_xmlChar fullpath(xmlNodeListGetString(doc, node, 1), xmlFree);
|
||||
if(!fullpath){
|
||||
S3FS_PRN_ERR("could not get object full path name..");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
// basepath(path) is as same as fullpath.
|
||||
if(0 == strcmp((char*)fullpath, path)){
|
||||
xmlFree(fullpath);
|
||||
return (char*)c_strErrorObjectName;
|
||||
if(0 == strcmp(reinterpret_cast<char*>(fullpath.get()), path)){
|
||||
return const_cast<char*>(c_strErrorObjectName);
|
||||
}
|
||||
|
||||
// Make dir path and filename
|
||||
std::string strdirpath = mydirname(std::string((char*)fullpath));
|
||||
std::string strmybpath = mybasename(std::string((char*)fullpath));
|
||||
std::string strdirpath = mydirname(reinterpret_cast<const char*>(fullpath.get()));
|
||||
std::string strmybpath = mybasename(reinterpret_cast<const char*>(fullpath.get()));
|
||||
const char* dirpath = strdirpath.c_str();
|
||||
const char* mybname = strmybpath.c_str();
|
||||
const char* basepath= (path && '/' == path[0]) ? &path[1] : path;
|
||||
xmlFree(fullpath);
|
||||
|
||||
if('\0' == mybname[0]){
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// check subdir & file in subdir
|
||||
if(0 < strlen(dirpath)){
|
||||
// case of "/"
|
||||
if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
return const_cast<char*>(c_strErrorObjectName);
|
||||
}
|
||||
// case of "."
|
||||
if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
return const_cast<char *>(c_strErrorObjectName);
|
||||
}
|
||||
// case of ".."
|
||||
if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
return const_cast<char *>(c_strErrorObjectName);
|
||||
}
|
||||
// case of "name"
|
||||
if(0 == strcmp(dirpath, ".")){
|
||||
@ -203,38 +199,35 @@ static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
|
||||
}
|
||||
}
|
||||
// case of something wrong
|
||||
return (char*)c_strErrorObjectName;
|
||||
return const_cast<char*>(c_strErrorObjectName);
|
||||
}
|
||||
|
||||
static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
|
||||
static unique_ptr_xmlChar get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
|
||||
{
|
||||
if(!doc || !ctx || !exp_key){
|
||||
return NULL;
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
|
||||
xmlXPathObjectPtr exp;
|
||||
xmlNodeSetPtr exp_nodes;
|
||||
xmlChar* exp_value;
|
||||
|
||||
// search exp_key tag
|
||||
if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){
|
||||
unique_ptr_xmlXPathObject exp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_key), ctx), xmlXPathFreeObject);
|
||||
if(nullptr == exp){
|
||||
S3FS_PRN_ERR("Could not find key(%s).", exp_key);
|
||||
return NULL;
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){
|
||||
S3FS_PRN_ERR("Key(%s) node is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
// get exp_key value & set in struct
|
||||
exp_nodes = exp->nodesetval;
|
||||
if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){
|
||||
unique_ptr_xmlChar exp_value(xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
|
||||
if(nullptr == exp_value){
|
||||
S3FS_PRN_ERR("Key(%s) value is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return exp_value;
|
||||
}
|
||||
|
||||
@ -244,7 +237,7 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
return false;
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);;
|
||||
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
|
||||
|
||||
std::string xmlnsurl;
|
||||
std::string ex_upload = "//";
|
||||
@ -253,7 +246,7 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
std::string ex_date;
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
|
||||
ex_upload += "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_id += "s3:";
|
||||
@ -265,15 +258,13 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
ex_date += "Initiated";
|
||||
|
||||
// get "Upload" Tags
|
||||
xmlXPathObjectPtr upload_xp;
|
||||
if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){
|
||||
unique_ptr_xmlXPathObject upload_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_upload.c_str()), ctx.get()), xmlXPathFreeObject);
|
||||
if(nullptr == upload_xp){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return false;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){
|
||||
S3FS_PRN_INFO("upload_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -285,70 +276,57 @@ bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
ctx->node = upload_nodes->nodeTab[cnt];
|
||||
|
||||
INCOMP_MPU_INFO part;
|
||||
xmlChar* ex_value;
|
||||
|
||||
// search "Key" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){
|
||||
unique_ptr_xmlChar ex_value(get_exp_value_xml(doc, ctx.get(), ex_key.c_str()));
|
||||
if(nullptr == ex_value){
|
||||
continue;
|
||||
}
|
||||
if('/' != *((char*)ex_value)){
|
||||
if('/' != *(reinterpret_cast<char*>(ex_value.get()))){
|
||||
part.key = "/";
|
||||
}else{
|
||||
part.key = "";
|
||||
}
|
||||
part.key += (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
part.key += reinterpret_cast<char*>(ex_value.get());
|
||||
|
||||
// search "UploadId" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){
|
||||
if(nullptr == (ex_value = get_exp_value_xml(doc, ctx.get(), ex_id.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.id = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
part.id = reinterpret_cast<char*>(ex_value.get());
|
||||
|
||||
// search "Initiated" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){
|
||||
if(nullptr == (ex_value = get_exp_value_xml(doc, ctx.get(), ex_date.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.date = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
part.date = reinterpret_cast<char*>(ex_value.get());
|
||||
|
||||
list.push_back(part);
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_truncated(xmlDocPtr doc)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
xmlChar* strTruncate = get_base_exp(doc, "IsTruncated");
|
||||
unique_ptr_xmlChar strTruncate(get_base_exp(doc, "IsTruncated"));
|
||||
if(!strTruncate){
|
||||
return false;
|
||||
}
|
||||
if(0 == strcasecmp((const char*)strTruncate, "true")){
|
||||
result = true;
|
||||
}
|
||||
xmlFree(strTruncate);
|
||||
return result;
|
||||
return 0 == strcasecmp(reinterpret_cast<const char*>(strTruncate.get()), "true");
|
||||
}
|
||||
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head)
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix)
|
||||
{
|
||||
xmlXPathObjectPtr contents_xp;
|
||||
xmlNodeSetPtr content_nodes;
|
||||
|
||||
if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){
|
||||
unique_ptr_xmlXPathObject contents_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_contents), ctx), xmlXPathFreeObject);
|
||||
if(nullptr == contents_xp){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return -1;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||
S3FS_PRN_DBG("contents_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
return 0;
|
||||
}
|
||||
content_nodes = contents_xp->nodesetval;
|
||||
@ -360,14 +338,13 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
|
||||
ctx->node = content_nodes->nodeTab[i];
|
||||
|
||||
// object name
|
||||
xmlXPathObjectPtr key;
|
||||
if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){
|
||||
unique_ptr_xmlXPathObject key(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_key), ctx), xmlXPathFreeObject);
|
||||
if(nullptr == key){
|
||||
S3FS_PRN_WARN("key is null. but continue.");
|
||||
continue;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(key->nodesetval)){
|
||||
S3FS_PRN_WARN("node is empty. but continue.");
|
||||
xmlXPathFreeObject(key);
|
||||
continue;
|
||||
}
|
||||
xmlNodeSetPtr key_nodes = key->nodesetval;
|
||||
@ -376,42 +353,44 @@ int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextP
|
||||
if(!name){
|
||||
S3FS_PRN_WARN("name is something wrong. but continue.");
|
||||
|
||||
}else if((const char*)name != c_strErrorObjectName){
|
||||
}else if(reinterpret_cast<const char*>(name) != c_strErrorObjectName){
|
||||
is_dir = isCPrefix ? true : false;
|
||||
stretag = "";
|
||||
|
||||
if(!isCPrefix && ex_etag){
|
||||
// Get ETag
|
||||
xmlXPathObjectPtr ETag;
|
||||
if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){
|
||||
unique_ptr_xmlXPathObject ETag(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_etag), ctx), xmlXPathFreeObject);
|
||||
if(nullptr != ETag){
|
||||
if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){
|
||||
S3FS_PRN_INFO("ETag->nodesetval is empty.");
|
||||
}else{
|
||||
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
|
||||
xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
unique_ptr_xmlChar petag(xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
|
||||
if(petag){
|
||||
stretag = (char*)petag;
|
||||
xmlFree(petag);
|
||||
stretag = reinterpret_cast<const char*>(petag.get());
|
||||
}
|
||||
}
|
||||
xmlXPathFreeObject(ETag);
|
||||
}
|
||||
}
|
||||
if(!head.insert(name, (!stretag.empty() ? stretag.c_str() : NULL), is_dir)){
|
||||
|
||||
// [NOTE]
|
||||
// The XML data passed to this function is CR code(\r) encoded.
|
||||
// The function below decodes that encoded CR code.
|
||||
//
|
||||
std::string decname = get_decoded_cr_code(name);
|
||||
free(name);
|
||||
|
||||
if(prefix){
|
||||
head.common_prefixes.push_back(decname);
|
||||
}
|
||||
if(!head.insert(decname.c_str(), (!stretag.empty() ? stretag.c_str() : nullptr), is_dir)){
|
||||
S3FS_PRN_ERR("insert_object returns with error.");
|
||||
xmlXPathFreeObject(key);
|
||||
xmlXPathFreeObject(contents_xp);
|
||||
free(name);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return -1;
|
||||
}
|
||||
free(name);
|
||||
}else{
|
||||
S3FS_PRN_DBG("name is file or subdir in dir. but continue.");
|
||||
}
|
||||
xmlXPathFreeObject(key);
|
||||
}
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -430,16 +409,13 @@ int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
|
||||
}
|
||||
|
||||
// If there is not <Prefix>, use path instead of it.
|
||||
xmlChar* pprefix = get_prefix(doc);
|
||||
std::string prefix = (pprefix ? (char*)pprefix : path ? path : "");
|
||||
if(pprefix){
|
||||
xmlFree(pprefix);
|
||||
}
|
||||
auto pprefix = get_prefix(doc);
|
||||
std::string prefix = (pprefix ? reinterpret_cast<char*>(pprefix.get()) : path ? path : "");
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
|
||||
ex_contents+= "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_cprefix += "s3:";
|
||||
@ -452,14 +428,12 @@ int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
|
||||
ex_prefix += "Prefix";
|
||||
ex_etag += "ETag";
|
||||
|
||||
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) ||
|
||||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) )
|
||||
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx.get(), ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head, /*prefix=*/ false) ||
|
||||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx.get(), ex_cprefix.c_str(), ex_prefix.c_str(), nullptr, 1, head, /*prefix=*/ true) )
|
||||
{
|
||||
S3FS_PRN_ERR("append_objects_from_xml_ex returns with error.");
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return -1;
|
||||
}
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -476,16 +450,15 @@ bool simple_parse_xml(const char* data, size_t len, const char* key, std::string
|
||||
}
|
||||
value.clear();
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(data, static_cast<int>(len), "", NULL, 0))){
|
||||
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(data, static_cast<int>(len), "", nullptr, 0), xmlFreeDoc);
|
||||
if(nullptr == doc){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == doc->children){
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
if(nullptr == doc->children){
|
||||
return false;
|
||||
}
|
||||
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
||||
for(xmlNodePtr cur_node = doc->children->children; nullptr != cur_node; cur_node = cur_node->next){
|
||||
// For DEBUG
|
||||
// std::string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
||||
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
||||
@ -506,7 +479,6 @@ bool simple_parse_xml(const char* data, size_t len, const char* key, std::string
|
||||
}
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -529,7 +501,7 @@ bool init_parser_xml_lock()
|
||||
|
||||
if(0 != pthread_mutex_init(pxml_parser_mutex, &attr)){
|
||||
delete pxml_parser_mutex;
|
||||
pxml_parser_mutex = NULL;
|
||||
pxml_parser_mutex = nullptr;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -544,7 +516,7 @@ bool destroy_parser_xml_lock()
|
||||
return false;
|
||||
}
|
||||
delete pxml_parser_mutex;
|
||||
pxml_parser_mutex = NULL;
|
||||
pxml_parser_mutex = nullptr;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -22,22 +22,27 @@
|
||||
#define S3FS_S3FS_XML_H_
|
||||
|
||||
#include <libxml/xpath.h>
|
||||
#include <libxml/xpathInternals.h>
|
||||
#include <libxml/tree.h>
|
||||
|
||||
#include <libxml/parser.h> // [NOTE] nessetially include this header in some environments
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "s3objlist.h"
|
||||
#include "mpu_util.h"
|
||||
|
||||
class S3ObjList;
|
||||
|
||||
typedef std::unique_ptr<xmlChar, decltype(xmlFree)> unique_ptr_xmlChar;
|
||||
typedef std::unique_ptr<xmlXPathObject, decltype(&xmlXPathFreeObject)> unique_ptr_xmlXPathObject;
|
||||
typedef std::unique_ptr<xmlXPathContext, decltype(&xmlXPathFreeContext)> unique_ptr_xmlXPathContext;
|
||||
typedef std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> unique_ptr_xmlDoc;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
bool is_truncated(xmlDocPtr doc);
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head);
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix);
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head);
|
||||
xmlChar* get_next_continuation_token(xmlDocPtr doc);
|
||||
xmlChar* get_next_marker(xmlDocPtr doc);
|
||||
unique_ptr_xmlChar get_next_continuation_token(xmlDocPtr doc);
|
||||
unique_ptr_xmlChar get_next_marker(xmlDocPtr doc);
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list);
|
||||
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
@ -19,10 +19,8 @@
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3objlist.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -86,7 +84,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
(*iter).second.orgname = orgname;
|
||||
(*iter).second.is_dir = is_dir;
|
||||
if(etag){
|
||||
(*iter).second.etag = std::string(etag); // over write
|
||||
(*iter).second.etag = etag; // over write
|
||||
}
|
||||
}else{
|
||||
// add new object
|
||||
@ -134,10 +132,10 @@ const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
if(objects.end() == (iter = objects.find(name))){
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return &((*iter).second);
|
||||
}
|
||||
@ -147,10 +145,10 @@ std::string S3ObjList::GetOrgName(const char* name) const
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return std::string("");
|
||||
return "";
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return std::string("");
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return "";
|
||||
}
|
||||
return ps3obj->orgname;
|
||||
}
|
||||
@ -160,13 +158,13 @@ std::string S3ObjList::GetNormalizedName(const char* name) const
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return std::string("");
|
||||
return "";
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return std::string("");
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return "";
|
||||
}
|
||||
if(ps3obj->normalname.empty()){
|
||||
return std::string(name);
|
||||
return name;
|
||||
}
|
||||
return ps3obj->normalname;
|
||||
}
|
||||
@ -176,10 +174,10 @@ std::string S3ObjList::GetETag(const char* name) const
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return std::string("");
|
||||
return "";
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return std::string("");
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return "";
|
||||
}
|
||||
return ps3obj->etag;
|
||||
}
|
||||
@ -188,7 +186,7 @@ bool S3ObjList::IsDir(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return false;
|
||||
}
|
||||
return ps3obj->is_dir;
|
||||
@ -200,12 +198,12 @@ bool S3ObjList::GetLastName(std::string& lastname) const
|
||||
lastname = "";
|
||||
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
|
||||
if((*iter).second.orgname.length()){
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
|
||||
if(lastname.compare(iter->second.orgname) < 0){
|
||||
lastname = (*iter).second.orgname;
|
||||
result = true;
|
||||
}
|
||||
}else{
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){
|
||||
if(lastname.compare(iter->second.normalname) < 0){
|
||||
lastname = (*iter).second.normalname;
|
||||
result = true;
|
||||
}
|
||||
|
||||
@ -21,6 +21,10 @@
|
||||
#ifndef S3FS_S3OBJLIST_H_
|
||||
#define S3FS_S3OBJLIST_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
@ -34,7 +38,7 @@ struct s3obj_entry{
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::list<std::string> s3obj_list_t;
|
||||
typedef std::vector<std::string> s3obj_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
@ -43,6 +47,8 @@ class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
public:
|
||||
std::vector<std::string> common_prefixes;
|
||||
|
||||
private:
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
@ -56,7 +62,7 @@ class S3ObjList
|
||||
~S3ObjList() {}
|
||||
|
||||
bool IsEmpty() const { return objects.empty(); }
|
||||
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
|
||||
bool insert(const char* name, const char* etag = nullptr, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
|
||||
@ -19,19 +19,17 @@
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <csignal>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "sighandlers.h"
|
||||
#include "fdcache.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsSignals
|
||||
//-------------------------------------------------------------------
|
||||
S3fsSignals* S3fsSignals::pSingleton = NULL;
|
||||
std::unique_ptr<S3fsSignals> S3fsSignals::pSingleton;
|
||||
bool S3fsSignals::enableUsr1 = false;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -40,15 +38,14 @@ bool S3fsSignals::enableUsr1 = false;
|
||||
bool S3fsSignals::Initialize()
|
||||
{
|
||||
if(!S3fsSignals::pSingleton){
|
||||
S3fsSignals::pSingleton = new S3fsSignals;
|
||||
S3fsSignals::pSingleton.reset(new S3fsSignals);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::Destroy()
|
||||
{
|
||||
delete S3fsSignals::pSingleton;
|
||||
S3fsSignals::pSingleton = NULL;
|
||||
S3fsSignals::pSingleton.reset();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -93,16 +90,19 @@ void* S3fsSignals::CheckCacheWorker(void* arg)
|
||||
{
|
||||
Semaphore* pSem = static_cast<Semaphore*>(arg);
|
||||
if(!pSem){
|
||||
pthread_exit(NULL);
|
||||
pthread_exit(nullptr);
|
||||
}
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
pthread_exit(NULL);
|
||||
pthread_exit(nullptr);
|
||||
}
|
||||
|
||||
// wait and loop
|
||||
while(S3fsSignals::enableUsr1){
|
||||
// wait
|
||||
pSem->wait();
|
||||
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
break; // assap
|
||||
}
|
||||
@ -117,7 +117,7 @@ void* S3fsSignals::CheckCacheWorker(void* arg)
|
||||
pSem->wait();
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR2(int sig)
|
||||
@ -136,7 +136,7 @@ bool S3fsSignals::InitUsr2Handler()
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR2;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR2, &sa, NULL)){
|
||||
if(0 != sigaction(SIGUSR2, &sa, nullptr)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -158,7 +158,7 @@ bool S3fsSignals::InitHupHandler()
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerHUP;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGHUP, &sa, NULL)){
|
||||
if(0 != sigaction(SIGHUP, &sa, nullptr)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -167,7 +167,7 @@ bool S3fsSignals::InitHupHandler()
|
||||
//-------------------------------------------------------------------
|
||||
// Methods
|
||||
//-------------------------------------------------------------------
|
||||
S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL)
|
||||
S3fsSignals::S3fsSignals()
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!InitUsr1Handler()){
|
||||
@ -200,23 +200,21 @@ bool S3fsSignals::InitUsr1Handler()
|
||||
|
||||
// create thread
|
||||
int result;
|
||||
pSemUsr1 = new Semaphore(0);
|
||||
pThreadUsr1 = new pthread_t;
|
||||
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
|
||||
std::unique_ptr<Semaphore> pSemUsr1_tmp(new Semaphore(0));
|
||||
std::unique_ptr<pthread_t> pThreadUsr1_tmp(new pthread_t);
|
||||
if(0 != (result = pthread_create(pThreadUsr1.get(), nullptr, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1_tmp.get())))){
|
||||
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
return false;
|
||||
}
|
||||
pSemUsr1 = std::move(pSemUsr1_tmp);
|
||||
pThreadUsr1 = std::move(pThreadUsr1_tmp);
|
||||
|
||||
// set handler
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR1;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR1, &sa, NULL)){
|
||||
if(0 != sigaction(SIGUSR1, &sa, nullptr)){
|
||||
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
|
||||
DestroyUsr1Handler();
|
||||
return false;
|
||||
@ -237,16 +235,14 @@ bool S3fsSignals::DestroyUsr1Handler()
|
||||
pSemUsr1->post();
|
||||
|
||||
// wait for thread exiting
|
||||
void* retval = NULL;
|
||||
void* retval = nullptr;
|
||||
int result;
|
||||
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
|
||||
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
|
||||
return false;
|
||||
}
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
pSemUsr1.reset();
|
||||
pThreadUsr1.reset();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -21,7 +21,9 @@
|
||||
#ifndef S3FS_SIGHANDLERS_H_
|
||||
#define S3FS_SIGHANDLERS_H_
|
||||
|
||||
#include "psemaphore.h"
|
||||
#include <memory>
|
||||
|
||||
class Semaphore;
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsSignals
|
||||
@ -29,14 +31,14 @@
|
||||
class S3fsSignals
|
||||
{
|
||||
private:
|
||||
static S3fsSignals* pSingleton;
|
||||
static std::unique_ptr<S3fsSignals> pSingleton;
|
||||
static bool enableUsr1;
|
||||
|
||||
pthread_t* pThreadUsr1;
|
||||
Semaphore* pSemUsr1;
|
||||
std::unique_ptr<pthread_t> pThreadUsr1;
|
||||
std::unique_ptr<Semaphore> pSemUsr1;
|
||||
|
||||
protected:
|
||||
static S3fsSignals* get() { return pSingleton; }
|
||||
static S3fsSignals* get() { return pSingleton.get(); }
|
||||
|
||||
static void HandlerUSR1(int sig);
|
||||
static void* CheckCacheWorker(void* arg);
|
||||
@ -48,13 +50,17 @@ class S3fsSignals
|
||||
static bool InitHupHandler();
|
||||
|
||||
S3fsSignals();
|
||||
~S3fsSignals();
|
||||
S3fsSignals(const S3fsSignals&) = delete;
|
||||
S3fsSignals(S3fsSignals&&) = delete;
|
||||
S3fsSignals& operator=(const S3fsSignals&) = delete;
|
||||
S3fsSignals& operator=(S3fsSignals&&) = delete;
|
||||
|
||||
bool InitUsr1Handler();
|
||||
bool DestroyUsr1Handler();
|
||||
bool WakeupUsr1Thread();
|
||||
|
||||
public:
|
||||
~S3fsSignals();
|
||||
static bool Initialize();
|
||||
static bool Destroy();
|
||||
|
||||
|
||||
@ -18,45 +18,27 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
#include <iomanip>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <sstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
const char SPACES[] = " \t\r\n";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Templates
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
template <class T> std::string str(T value)
|
||||
|
||||
std::string str(const struct timespec value)
|
||||
{
|
||||
std::ostringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
template std::string str(unsigned short value);
|
||||
template std::string str(int value);
|
||||
template std::string str(unsigned int value);
|
||||
template std::string str(long value);
|
||||
template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
template<> std::string str(struct timespec value) {
|
||||
std::ostringstream s;
|
||||
s << value.tv_sec;
|
||||
if(value.tv_nsec != 0){
|
||||
@ -65,10 +47,6 @@ template<> std::string str(struct timespec value) {
|
||||
return s.str();
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
#ifdef __MSYS__
|
||||
/*
|
||||
* Polyfill for strptime function
|
||||
@ -89,7 +67,7 @@ char* strptime(const char* s, const char* f, struct tm* tm)
|
||||
|
||||
bool s3fs_strtoofft(off_t* value, const char* str, int base)
|
||||
{
|
||||
if(value == NULL || str == NULL){
|
||||
if(value == nullptr || str == nullptr){
|
||||
return false;
|
||||
}
|
||||
errno = 0;
|
||||
@ -126,15 +104,13 @@ std::string lower(std::string s)
|
||||
return s;
|
||||
}
|
||||
|
||||
std::string trim_left(const std::string &s, const char *t /* = SPACES */)
|
||||
std::string trim_left(std::string d, const char *t /* = SPACES */)
|
||||
{
|
||||
std::string d(s);
|
||||
return d.erase(0, s.find_first_not_of(t));
|
||||
return d.erase(0, d.find_first_not_of(t));
|
||||
}
|
||||
|
||||
std::string trim_right(const std::string &s, const char *t /* = SPACES */)
|
||||
std::string trim_right(std::string d, const char *t /* = SPACES */)
|
||||
{
|
||||
std::string d(s);
|
||||
std::string::size_type i(d.find_last_not_of(t));
|
||||
if(i == std::string::npos){
|
||||
return "";
|
||||
@ -143,29 +119,46 @@ std::string trim_right(const std::string &s, const char *t /* = SPACES */)
|
||||
}
|
||||
}
|
||||
|
||||
std::string trim(const std::string &s, const char *t /* = SPACES */)
|
||||
std::string trim(std::string s, const char *t /* = SPACES */)
|
||||
{
|
||||
return trim_left(trim_right(s, t), t);
|
||||
return trim_left(trim_right(std::move(s), t), t);
|
||||
}
|
||||
|
||||
std::string peeloff(const std::string& s)
|
||||
{
|
||||
if(s.size() < 2 || *s.begin() != '"' || *s.rbegin() != '"'){
|
||||
return s;
|
||||
}
|
||||
return s.substr(1, s.size() - 2);
|
||||
}
|
||||
|
||||
//
|
||||
// urlEncode a fuse path,
|
||||
// taking into special consideration "/",
|
||||
// otherwise regular urlEncode.
|
||||
// Three url encode functions
|
||||
//
|
||||
std::string urlEncode(const std::string &s)
|
||||
// urlEncodeGeneral: A general URL encoding function.
|
||||
// urlEncodePath : A function that URL encodes by excluding the path
|
||||
// separator('/').
|
||||
// urlEncodeQuery : A function that does URL encoding by excluding
|
||||
// some characters('=', '&' and '%').
|
||||
// This function can be used when the target string
|
||||
// contains already URL encoded strings. It also
|
||||
// excludes the character () used in query strings.
|
||||
// Therefore, it is a function to use as URL encoding
|
||||
// for use in query strings.
|
||||
//
|
||||
static constexpr char encode_general_except_chars[] = ".-_~"; // For general URL encode
|
||||
static constexpr char encode_path_except_chars[] = ".-_~/"; // For fuse(included path) URL encode
|
||||
static constexpr char encode_query_except_chars[] = ".-_~=&%"; // For query params(and encoded string)
|
||||
|
||||
static std::string rawUrlEncode(const std::string &s, const char* except_chars)
|
||||
{
|
||||
std::string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
unsigned char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9'))
|
||||
if((except_chars && nullptr != strchr(except_chars, c)) ||
|
||||
(c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') )
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
@ -176,34 +169,19 @@ std::string urlEncode(const std::string &s)
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// urlEncode a fuse path,
|
||||
// taking into special consideration "/",
|
||||
// otherwise regular urlEncode.
|
||||
//
|
||||
std::string urlEncode2(const std::string &s)
|
||||
std::string urlEncodeGeneral(const std::string &s)
|
||||
{
|
||||
std::string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
unsigned char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
|| c == '%'
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9'))
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
result += "%";
|
||||
result += s3fs_hex_upper(&c, 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
return rawUrlEncode(s, encode_general_except_chars);
|
||||
}
|
||||
|
||||
std::string urlEncodePath(const std::string &s)
|
||||
{
|
||||
return rawUrlEncode(s, encode_path_except_chars);
|
||||
}
|
||||
|
||||
std::string urlEncodeQuery(const std::string &s)
|
||||
{
|
||||
return rawUrlEncode(s, encode_query_except_chars);
|
||||
}
|
||||
|
||||
std::string urlDecode(const std::string& s)
|
||||
@ -282,7 +260,7 @@ bool get_keyword_value(const std::string& target, const char* keyword, std::stri
|
||||
std::string get_date_rfc850()
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
time_t t = time(nullptr);
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
|
||||
return buf;
|
||||
@ -290,7 +268,7 @@ std::string get_date_rfc850()
|
||||
|
||||
void get_date_sigv3(std::string& date, std::string& date8601)
|
||||
{
|
||||
time_t tm = time(NULL);
|
||||
time_t tm = time(nullptr);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
}
|
||||
@ -318,7 +296,7 @@ bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
const char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
@ -400,31 +378,26 @@ std::string s3fs_hex_upper(const unsigned char* input, size_t length)
|
||||
return s3fs_hex(input, length, "0123456789ABCDEF");
|
||||
}
|
||||
|
||||
char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
std::string s3fs_base64(const unsigned char* input, size_t length)
|
||||
{
|
||||
static const char base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
static constexpr char base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
|
||||
if(!input || 0 == length){
|
||||
return NULL;
|
||||
}
|
||||
result = new char[((length + 3 - 1) / 3) * 4 + 1];
|
||||
std::string result;
|
||||
result.reserve(((length + 3 - 1) / 3) * 4 + 1);
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
for(rpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
result += base[parts[0]];
|
||||
result += base[parts[1]];
|
||||
result += base[parts[2]];
|
||||
result += base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -450,34 +423,28 @@ inline unsigned char char_decode64(const char ch)
|
||||
return by;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plength)
|
||||
std::string s3fs_decode64(const char* input, size_t input_len)
|
||||
{
|
||||
unsigned char* result;
|
||||
if(!input || 0 == input_len || !plength){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[input_len / 4 * 3];
|
||||
|
||||
std::string result;
|
||||
result.reserve(input_len / 4 * 3);
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
|
||||
for(rpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
|
||||
result += static_cast<char>(((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03));
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
|
||||
result += static_cast<char>(((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f));
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
|
||||
result += static_cast<char>(((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f));
|
||||
}
|
||||
*plength = wpos;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -492,7 +459,7 @@ unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plengt
|
||||
|
||||
// Base location for transform. The range 0xE000 - 0xF8ff
|
||||
// is a private range, se use the start of this range.
|
||||
static const unsigned int escape_base = 0xe000;
|
||||
static constexpr unsigned int escape_base = 0xe000;
|
||||
|
||||
// encode bytes into wobbly utf8.
|
||||
// 'result' can be null. returns true if transform was needed.
|
||||
@ -609,6 +576,89 @@ std::string s3fs_wtf8_decode(const std::string &s)
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// Encode only CR('\r'=0x0D) and it also encodes the '%' character accordingly.
|
||||
//
|
||||
// The xmlReadMemory() function in libxml2 replaces CR code with LF code('\n'=0x0A)
|
||||
// due to the XML specification.
|
||||
// s3fs uses libxml2 to parse the S3 response, and this automatic substitution
|
||||
// of libxml2 may change the object name(file/dir name). Therefore, before passing
|
||||
// the response to the xmlReadMemory() function, we need the string encoded by
|
||||
// this function.
|
||||
//
|
||||
// [NOTE]
|
||||
// Normally the quotes included in the XML content data are HTML encoded(""").
|
||||
// Encoding for CR can also be HTML encoded as binary code (ex, " "), but
|
||||
// if the same string content(as file name) as this encoded string exists, the
|
||||
// original string cannot be distinguished whichever encoded or not encoded.
|
||||
// Therefore, CR is encoded in the same manner as URL encoding("%0A").
|
||||
// And it is assumed that there is no CR code in the S3 response tag etc.(actually
|
||||
// it shouldn't exist)
|
||||
//
|
||||
std::string get_encoded_cr_code(const char* pbase)
|
||||
{
|
||||
std::string result;
|
||||
if(!pbase){
|
||||
return result;
|
||||
}
|
||||
std::string strbase(pbase);
|
||||
size_t baselength = strbase.length();
|
||||
size_t startpos = 0;
|
||||
size_t foundpos;
|
||||
while(startpos < baselength && std::string::npos != (foundpos = strbase.find_first_of("%\r", startpos))){
|
||||
if(0 < (foundpos - startpos)){
|
||||
result += strbase.substr(startpos, foundpos - startpos);
|
||||
}
|
||||
if('%' == strbase[foundpos]){
|
||||
result += "%45";
|
||||
}else if('\r' == strbase[foundpos]){
|
||||
result += "%0D";
|
||||
}
|
||||
startpos = foundpos + 1;
|
||||
}
|
||||
if(startpos < baselength){
|
||||
result += strbase.substr(startpos);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// Decode a string encoded with get_encoded_cr_code().
|
||||
//
|
||||
std::string get_decoded_cr_code(const char* pencode)
|
||||
{
|
||||
std::string result;
|
||||
if(!pencode){
|
||||
return result;
|
||||
}
|
||||
std::string strencode(pencode);
|
||||
size_t encodelength = strencode.length();
|
||||
size_t startpos = 0;
|
||||
size_t foundpos;
|
||||
while(startpos < encodelength && std::string::npos != (foundpos = strencode.find('%', startpos))){
|
||||
if(0 < (foundpos - startpos)){
|
||||
result += strencode.substr(startpos, foundpos - startpos);
|
||||
}
|
||||
if((foundpos + 2) < encodelength && 0 == strencode.compare(foundpos, 3, "%45")){
|
||||
result += '%';
|
||||
startpos = foundpos + 3;
|
||||
}else if((foundpos + 2) < encodelength && 0 == strencode.compare(foundpos, 3, "%0D")){
|
||||
result += '\r';
|
||||
startpos = foundpos + 3;
|
||||
}else if((foundpos + 1) < encodelength && 0 == strencode.compare(foundpos, 2, "%%")){
|
||||
result += '%';
|
||||
startpos = foundpos + 2;
|
||||
}else{
|
||||
result += '%';
|
||||
startpos = foundpos + 1;
|
||||
}
|
||||
}
|
||||
if(startpos < encodelength){
|
||||
result += strencode.substr(startpos);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -21,13 +21,16 @@
|
||||
#ifndef S3FS_STRING_UTIL_H_
|
||||
#define S3FS_STRING_UTIL_H_
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
//
|
||||
// A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
extern const char SPACES[];
|
||||
static constexpr char SPACES[] = " \t\r\n";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Inline functions
|
||||
@ -35,11 +38,6 @@ extern const char SPACES[];
|
||||
static inline int is_prefix(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; }
|
||||
static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Templates
|
||||
//-------------------------------------------------------------------
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Macros(WTF8)
|
||||
//-------------------------------------------------------------------
|
||||
@ -54,6 +52,9 @@ template <class T> std::string str(T value);
|
||||
//-------------------------------------------------------------------
|
||||
// Utilities
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: rename to to_string?
|
||||
std::string str(const struct timespec value);
|
||||
|
||||
#ifdef __MSYS__
|
||||
//
|
||||
// Polyfill for strptime function.
|
||||
@ -74,10 +75,11 @@ off_t cvt_strtoofft(const char* str, int base);
|
||||
//
|
||||
// String Manipulation
|
||||
//
|
||||
std::string trim_left(const std::string &s, const char *t = SPACES);
|
||||
std::string trim_right(const std::string &s, const char *t = SPACES);
|
||||
std::string trim(const std::string &s, const char *t = SPACES);
|
||||
std::string trim_left(std::string s, const char *t = SPACES);
|
||||
std::string trim_right(std::string s, const char *t = SPACES);
|
||||
std::string trim(std::string s, const char *t = SPACES);
|
||||
std::string lower(std::string s);
|
||||
std::string peeloff(const std::string& s);
|
||||
|
||||
//
|
||||
// Date string
|
||||
@ -92,8 +94,9 @@ bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
|
||||
//
|
||||
// For encoding
|
||||
//
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlEncodeGeneral(const std::string &s);
|
||||
std::string urlEncodePath(const std::string &s);
|
||||
std::string urlEncodeQuery(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
|
||||
bool takeout_str_dquart(std::string& str);
|
||||
@ -104,8 +107,8 @@ bool get_keyword_value(const std::string& target, const char* keyword, std::stri
|
||||
//
|
||||
std::string s3fs_hex_lower(const unsigned char* input, size_t length);
|
||||
std::string s3fs_hex_upper(const unsigned char* input, size_t length);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t input_len, size_t* plength);
|
||||
std::string s3fs_base64(const unsigned char* input, size_t length);
|
||||
std::string s3fs_decode64(const char* input, size_t input_len);
|
||||
|
||||
//
|
||||
// WTF8
|
||||
@ -115,6 +118,12 @@ std::string s3fs_wtf8_encode(const std::string &s);
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_decode(const std::string &s);
|
||||
|
||||
//
|
||||
// For CR in XML
|
||||
//
|
||||
std::string get_encoded_cr_code(const char* pbase);
|
||||
std::string get_decoded_cr_code(const char* pencode);
|
||||
|
||||
#endif // S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -54,9 +54,9 @@ const std::string& S3fsCred::GetBucket()
|
||||
|
||||
#define ASSERT_IS_SORTED(x) assert_is_sorted((x), __FILE__, __LINE__)
|
||||
|
||||
void assert_is_sorted(struct curl_slist* list, const char *file, int line)
|
||||
void assert_is_sorted(const struct curl_slist* list, const char *file, int line)
|
||||
{
|
||||
for(; list != NULL; list = list->next){
|
||||
for(; list != nullptr; list = list->next){
|
||||
std::string key1 = list->data;
|
||||
key1.erase(key1.find(':'));
|
||||
std::string key2 = list->data;
|
||||
@ -74,7 +74,7 @@ void assert_is_sorted(struct curl_slist* list, const char *file, int line)
|
||||
size_t curl_slist_length(const struct curl_slist* list)
|
||||
{
|
||||
size_t len = 0;
|
||||
for(; list != NULL; list = list->next){
|
||||
for(; list != nullptr; list = list->next){
|
||||
++len;
|
||||
}
|
||||
return len;
|
||||
@ -82,7 +82,7 @@ size_t curl_slist_length(const struct curl_slist* list)
|
||||
|
||||
void test_sort_insert()
|
||||
{
|
||||
struct curl_slist* list = NULL;
|
||||
struct curl_slist* list = nullptr;
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to head
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
@ -105,9 +105,55 @@ void test_sort_insert()
|
||||
curl_slist_free_all(list);
|
||||
}
|
||||
|
||||
void test_slist_remove()
|
||||
{
|
||||
struct curl_slist* list = nullptr;
|
||||
|
||||
// remove no elements
|
||||
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "1");
|
||||
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
|
||||
|
||||
// remove only element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "1");
|
||||
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
|
||||
|
||||
// remove head element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "1");
|
||||
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
|
||||
curl_slist_free_all(list);
|
||||
|
||||
// remove tail element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "2");
|
||||
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
|
||||
curl_slist_free_all(list);
|
||||
|
||||
// remove middle element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
list = curl_slist_sort_insert(list, "3", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(3), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "2");
|
||||
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
|
||||
curl_slist_free_all(list);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_sort_insert();
|
||||
test_slist_remove();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -18,11 +18,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <limits>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
|
||||
#include "fdcache.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "test_util.h"
|
||||
|
||||
bool CacheFileStat::Open() { return false; }
|
||||
@ -36,7 +33,7 @@ void test_compress()
|
||||
ASSERT_EQUALS(off_t(42), list.Size());
|
||||
ASSERT_FALSE(list.IsPageLoaded(0, 1));
|
||||
|
||||
list.SetPageLoadedStatus(0, 1, /*pstatus=*/ PageList::PAGE_LOADED);
|
||||
list.SetPageLoadedStatus(0, 1, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
ASSERT_TRUE(list.IsPageLoaded(0, 1));
|
||||
ASSERT_FALSE(list.IsPageLoaded(0, 2));
|
||||
|
||||
@ -47,7 +44,7 @@ void test_compress()
|
||||
ASSERT_EQUALS(off_t(41), size);
|
||||
|
||||
// test adding subsequent page then compressing
|
||||
list.SetPageLoadedStatus(1, 3, /*pstatus=*/ PageList::PAGE_LOADED);
|
||||
list.SetPageLoadedStatus(1, 3, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
list.Compress();
|
||||
ASSERT_TRUE(list.IsPageLoaded(0, 3));
|
||||
|
||||
@ -56,7 +53,7 @@ void test_compress()
|
||||
ASSERT_EQUALS(off_t(38), size);
|
||||
|
||||
// test adding non-contiguous page then compressing
|
||||
list.SetPageLoadedStatus(5, 1, /*pstatus=*/ PageList::PAGE_LOADED);
|
||||
list.SetPageLoadedStatus(5, 1, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
list.Compress();
|
||||
|
||||
ASSERT_TRUE(list.FindUnloadedPage(0, start, size));
|
||||
@ -66,7 +63,7 @@ void test_compress()
|
||||
printf("\n");
|
||||
|
||||
// test adding page between two pages then compressing
|
||||
list.SetPageLoadedStatus(4, 1, /*pstatus=*/ PageList::PAGE_LOADED);
|
||||
list.SetPageLoadedStatus(4, 1, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
list.Compress();
|
||||
|
||||
list.Dump();
|
||||
|
||||
@ -18,15 +18,12 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <limits>
|
||||
#include <stdint.h>
|
||||
#include <strings.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "string_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
@ -53,47 +50,48 @@ void test_trim()
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("0"), str(0));
|
||||
ASSERT_EQUALS(std::string("1"), str(1));
|
||||
ASSERT_EQUALS(std::string("-1"), str(-1));
|
||||
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("1234"), peeloff("\"1234\"")); // "1234" -> 1234
|
||||
ASSERT_EQUALS(std::string("\"1234\""), peeloff("\"\"1234\"\"")); // ""1234"" -> "1234"
|
||||
ASSERT_EQUALS(std::string("\"1234"), peeloff("\"\"1234\"")); // ""1234" -> "1234
|
||||
ASSERT_EQUALS(std::string("1234\""), peeloff("\"1234\"\"")); // "1234"" -> 1234"
|
||||
ASSERT_EQUALS(std::string("\"1234"), peeloff("\"1234")); // "1234 -> "1234
|
||||
ASSERT_EQUALS(std::string("1234\""), peeloff("1234\"")); // 1234" -> 1234"
|
||||
ASSERT_EQUALS(std::string(" \"1234\""), peeloff(" \"1234\"")); // _"1234" -> _"1234"
|
||||
ASSERT_EQUALS(std::string("\"1234\" "), peeloff("\"1234\" ")); // "1234"_ -> "1234"_
|
||||
}
|
||||
|
||||
void test_base64()
|
||||
{
|
||||
unsigned char *buf;
|
||||
size_t len;
|
||||
std::string buf;
|
||||
char tmpbuf = '\0';
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
|
||||
buf = s3fs_decode64(NULL, 0, &len);
|
||||
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, NULL, 0);
|
||||
ASSERT_EQUALS(s3fs_base64(nullptr, 0), std::string(""));
|
||||
buf = s3fs_decode64(nullptr, 0);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
|
||||
buf = s3fs_decode64("", 0, &len);
|
||||
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, NULL, 0);
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), std::string(""));
|
||||
buf = s3fs_decode64("", 0);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
|
||||
buf = s3fs_decode64("MQ==", 4, &len);
|
||||
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "1", 1);
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(1));
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), std::string("MQ=="));
|
||||
buf = s3fs_decode64("MQ==", 4);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1", 1);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(1));
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
|
||||
buf = s3fs_decode64("MTI=", 4, &len);
|
||||
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "12", 2);
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(2));
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), std::string("MTI="));
|
||||
buf = s3fs_decode64("MTI=", 4);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "12", 2);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(2));
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
|
||||
buf = s3fs_decode64("MTIz", 4, &len);
|
||||
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "123", 3);
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(3));
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), std::string("MTIz"));
|
||||
buf = s3fs_decode64("MTIz", 4);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "123", 3);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(3));
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
|
||||
buf = s3fs_decode64("MTIzNA==", 8, &len);
|
||||
ASSERT_BUFEQUALS(reinterpret_cast<const char *>(buf), len, "1234", 4);
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(4));
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), std::string("MTIzNA=="));
|
||||
buf = s3fs_decode64("MTIzNA==", 8);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1234", 4);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(4));
|
||||
|
||||
// TODO: invalid input
|
||||
}
|
||||
@ -150,6 +148,55 @@ void test_wtf8_encoding()
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
}
|
||||
|
||||
void test_cr_encoding()
|
||||
{
|
||||
// bse strings
|
||||
std::string base_no("STR");
|
||||
|
||||
std::string base_end_cr1("STR\r");
|
||||
std::string base_mid_cr1("STR\rSTR");
|
||||
std::string base_end_cr2("STR\r\r");
|
||||
std::string base_mid_cr2("STR\r\rSTR");
|
||||
|
||||
std::string base_end_per1("STR%");
|
||||
std::string base_mid_per1("STR%STR");
|
||||
std::string base_end_per2("STR%%");
|
||||
std::string base_mid_per2("STR%%STR");
|
||||
|
||||
std::string base_end_crlf1("STR\r\n");
|
||||
std::string base_mid_crlf1("STR\r\nSTR");
|
||||
std::string base_end_crlf2("STR\r\n\r\n");
|
||||
std::string base_mid_crlf2("STR\r\n\r\nSTR");
|
||||
|
||||
std::string base_end_crper1("STR%\r");
|
||||
std::string base_mid_crper1("STR%\rSTR");
|
||||
std::string base_end_crper2("STR%\r%\r");
|
||||
std::string base_mid_crper2("STR%\r%\rSTR");
|
||||
|
||||
// encode->decode->compare
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_no.c_str()).c_str()), base_no);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_cr1.c_str()).c_str()), base_end_cr1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_cr1.c_str()).c_str()), base_mid_cr1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_cr2.c_str()).c_str()), base_end_cr2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_cr2.c_str()).c_str()), base_mid_cr2);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_per1.c_str()).c_str()), base_end_per1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_per1.c_str()).c_str()), base_mid_per1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_per2.c_str()).c_str()), base_end_per2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_per2.c_str()).c_str()), base_mid_per2);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crlf1.c_str()).c_str()), base_end_crlf1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crlf1.c_str()).c_str()), base_mid_crlf1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crlf2.c_str()).c_str()), base_end_crlf2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crlf2.c_str()).c_str()), base_mid_crlf2);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crper1.c_str()).c_str()), base_end_crper1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crper1.c_str()).c_str()), base_mid_crper1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crper2.c_str()).c_str()), base_end_crper2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crper2.c_str()).c_str()), base_mid_crper2);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
S3fsLog singletonLog;
|
||||
@ -158,6 +205,7 @@ int main(int argc, char *argv[])
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
test_cr_encoding();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ template <typename T> void assert_equals(const T &x, const T &y, const char *fil
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ template <> void assert_equals(const std::string &x, const std::string &y, const
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
|
||||
std::exit(1);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ template <typename T> void assert_nequals(const T &x, const T &y, const char *fi
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,29 +61,29 @@ template <> void assert_nequals(const std::string &x, const std::string &y, cons
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
|
||||
std::exit(1);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
if(x == nullptr && y == nullptr){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
} else if(x == nullptr || y == nullptr || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void assert_bufequals(const char *x, size_t len1, const char *y, size_t len2, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
if(x == nullptr && y == nullptr){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == NULL || y == NULL || len1 != len2 || memcmp(x, y, len1) != 0){
|
||||
} else if(x == nullptr || y == nullptr || len1 != len2 || memcmp(x, y, len1) != 0){
|
||||
std::cerr << (x ? std::string(x, len1) : "null") << " != " << (y ? std::string(y, len2) : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
264
src/threadpoolman.cpp
Normal file
264
src/threadpoolman.cpp
Normal file
@ -0,0 +1,264 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "threadpoolman.h"
|
||||
#include "autolock.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// ThreadPoolMan class variables
|
||||
//------------------------------------------------
|
||||
ThreadPoolMan* ThreadPoolMan::singleton = nullptr;
|
||||
|
||||
//------------------------------------------------
|
||||
// ThreadPoolMan class methods
|
||||
//------------------------------------------------
|
||||
bool ThreadPoolMan::Initialize(int count)
|
||||
{
|
||||
if(ThreadPoolMan::singleton){
|
||||
S3FS_PRN_WARN("Already singleton for Thread Manager is existed, then re-create it.");
|
||||
ThreadPoolMan::Destroy();
|
||||
}
|
||||
ThreadPoolMan::singleton = new ThreadPoolMan(count);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ThreadPoolMan::Destroy()
|
||||
{
|
||||
if(ThreadPoolMan::singleton){
|
||||
delete ThreadPoolMan::singleton;
|
||||
ThreadPoolMan::singleton = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::Instruct(std::unique_ptr<thpoolman_param> pparam)
|
||||
{
|
||||
if(!ThreadPoolMan::singleton){
|
||||
S3FS_PRN_WARN("The singleton object is not initialized yet.");
|
||||
return false;
|
||||
}
|
||||
return ThreadPoolMan::singleton->SetInstruction(std::move(pparam));
|
||||
}
|
||||
|
||||
//
|
||||
// Thread worker
|
||||
//
|
||||
void* ThreadPoolMan::Worker(void* arg)
|
||||
{
|
||||
ThreadPoolMan* psingleton = static_cast<ThreadPoolMan*>(arg);
|
||||
|
||||
if(!psingleton){
|
||||
S3FS_PRN_ERR("The parameter for worker thread is invalid.");
|
||||
return reinterpret_cast<void*>(-EIO);
|
||||
}
|
||||
S3FS_PRN_INFO3("Start worker thread in ThreadPoolMan.");
|
||||
|
||||
while(!psingleton->IsExit()){
|
||||
// wait
|
||||
psingleton->thpoolman_sem.wait();
|
||||
|
||||
if(psingleton->IsExit()){
|
||||
break;
|
||||
}
|
||||
|
||||
// get instruction
|
||||
std::unique_ptr<thpoolman_param> pparam;
|
||||
{
|
||||
AutoLock auto_lock(&(psingleton->thread_list_lock));
|
||||
|
||||
if(!psingleton->instruction_list.empty()){
|
||||
pparam = std::move(psingleton->instruction_list.front());
|
||||
psingleton->instruction_list.pop_front();
|
||||
if(!pparam){
|
||||
S3FS_PRN_WARN("Got a semaphore, but the instruction is empty.");
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_WARN("Got a semaphore, but there is no instruction.");
|
||||
pparam = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
if(pparam){
|
||||
void* retval = pparam->pfunc(pparam->args);
|
||||
if(nullptr != retval){
|
||||
S3FS_PRN_WARN("The instruction function returned with somthign error code(%ld).", reinterpret_cast<long>(retval));
|
||||
}
|
||||
if(pparam->psem){
|
||||
pparam->psem->post();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// ThreadPoolMan methods
|
||||
//------------------------------------------------
|
||||
ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0), is_lock_init(false)
|
||||
{
|
||||
if(count < 1){
|
||||
S3FS_PRN_CRIT("Failed to creating singleton for Thread Manager, because thread count(%d) is under 1.", count);
|
||||
abort();
|
||||
}
|
||||
if(ThreadPoolMan::singleton){
|
||||
S3FS_PRN_CRIT("Already singleton for Thread Manager is existed.");
|
||||
abort();
|
||||
}
|
||||
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_init(&thread_list_lock, &attr))){
|
||||
S3FS_PRN_CRIT("failed to init thread_list_lock: %d", result);
|
||||
abort();
|
||||
}
|
||||
is_lock_init = true;
|
||||
|
||||
// create threads
|
||||
if(!StartThreads(count)){
|
||||
S3FS_PRN_ERR("Failed starting threads at initializing.");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
ThreadPoolMan::~ThreadPoolMan()
|
||||
{
|
||||
StopThreads();
|
||||
|
||||
if(is_lock_init){
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_destroy(&thread_list_lock))){
|
||||
S3FS_PRN_CRIT("failed to destroy thread_list_lock: %d", result);
|
||||
abort();
|
||||
}
|
||||
is_lock_init = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::IsExit() const
|
||||
{
|
||||
return is_exit;
|
||||
}
|
||||
|
||||
void ThreadPoolMan::SetExitFlag(bool exit_flag)
|
||||
{
|
||||
is_exit = exit_flag;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::StopThreads()
|
||||
{
|
||||
if(thread_list.empty()){
|
||||
S3FS_PRN_INFO("Any threads are running now, then nothing to do.");
|
||||
return true;
|
||||
}
|
||||
|
||||
// all threads to exit
|
||||
SetExitFlag(true);
|
||||
for(size_t waitcnt = thread_list.size(); 0 < waitcnt; --waitcnt){
|
||||
thpoolman_sem.post();
|
||||
}
|
||||
|
||||
// wait for threads exiting
|
||||
for(thread_list_t::const_iterator iter = thread_list.begin(); iter != thread_list.end(); ++iter){
|
||||
void* retval = nullptr;
|
||||
int result = pthread_join(*iter, &retval);
|
||||
if(result){
|
||||
S3FS_PRN_ERR("failed pthread_join - result(%d)", result);
|
||||
}else{
|
||||
S3FS_PRN_DBG("succeed pthread_join - return code(%ld)", reinterpret_cast<long>(retval));
|
||||
}
|
||||
}
|
||||
thread_list.clear();
|
||||
|
||||
// reset semaphore(to zero)
|
||||
while(thpoolman_sem.try_wait()){
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::StartThreads(int count)
|
||||
{
|
||||
if(count < 1){
|
||||
S3FS_PRN_ERR("Failed to creating threads, because thread count(%d) is under 1.", count);
|
||||
return false;
|
||||
}
|
||||
|
||||
// stop all thread if they are running.
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!StopThreads()){
|
||||
S3FS_PRN_ERR("Failed to stop existed threads.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// create all threads
|
||||
SetExitFlag(false);
|
||||
for(int cnt = 0; cnt < count; ++cnt){
|
||||
// run thread
|
||||
pthread_t thread;
|
||||
int result;
|
||||
if(0 != (result = pthread_create(&thread, nullptr, ThreadPoolMan::Worker, static_cast<void*>(this)))){
|
||||
S3FS_PRN_ERR("failed pthread_create with return code(%d)", result);
|
||||
StopThreads(); // if possible, stop all threads
|
||||
return false;
|
||||
}
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::SetInstruction(std::unique_ptr<thpoolman_param> pparam)
|
||||
{
|
||||
if(!pparam){
|
||||
S3FS_PRN_ERR("The parameter value is nullptr.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// set parameter to list
|
||||
{
|
||||
AutoLock auto_lock(&thread_list_lock);
|
||||
instruction_list.push_back(std::move(pparam));
|
||||
}
|
||||
|
||||
// run thread
|
||||
thpoolman_sem.post();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
109
src/threadpoolman.h
Normal file
109
src/threadpoolman.h
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_THREADPOOLMAN_H_
|
||||
#define S3FS_THREADPOOLMAN_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "psemaphore.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Typedefs for functions and structures
|
||||
//------------------------------------------------
|
||||
//
|
||||
// Prototype function
|
||||
//
|
||||
typedef void* (*thpoolman_worker)(void*); // same as start_routine for pthread_create function
|
||||
|
||||
//
|
||||
// Parameter structure
|
||||
//
|
||||
// [NOTE]
|
||||
// The args member is a value that is an argument of the worker function.
|
||||
// The psem member is allowed nullptr. If it is not nullptr, the post() method is
|
||||
// called when finishing the function.
|
||||
//
|
||||
struct thpoolman_param
|
||||
{
|
||||
void* args;
|
||||
Semaphore* psem;
|
||||
thpoolman_worker pfunc;
|
||||
|
||||
thpoolman_param() : args(nullptr), psem(nullptr), pfunc(nullptr) {}
|
||||
};
|
||||
|
||||
typedef std::list<std::unique_ptr<thpoolman_param>> thpoolman_params_t;
|
||||
|
||||
typedef std::vector<pthread_t> thread_list_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class ThreadPoolMan
|
||||
//------------------------------------------------
|
||||
class ThreadPoolMan
|
||||
{
|
||||
private:
|
||||
static ThreadPoolMan* singleton;
|
||||
|
||||
std::atomic<bool> is_exit;
|
||||
Semaphore thpoolman_sem;
|
||||
|
||||
bool is_lock_init;
|
||||
pthread_mutex_t thread_list_lock;
|
||||
thread_list_t thread_list;
|
||||
|
||||
thpoolman_params_t instruction_list;
|
||||
|
||||
private:
|
||||
static void* Worker(void* arg);
|
||||
|
||||
explicit ThreadPoolMan(int count = 1);
|
||||
~ThreadPoolMan();
|
||||
ThreadPoolMan(const ThreadPoolMan&) = delete;
|
||||
ThreadPoolMan(ThreadPoolMan&&) = delete;
|
||||
ThreadPoolMan& operator=(const ThreadPoolMan&) = delete;
|
||||
ThreadPoolMan& operator=(ThreadPoolMan&&) = delete;
|
||||
|
||||
bool IsExit() const;
|
||||
void SetExitFlag(bool exit_flag);
|
||||
|
||||
bool StopThreads();
|
||||
bool StartThreads(int count);
|
||||
bool SetInstruction(std::unique_ptr<thpoolman_param> pparam);
|
||||
|
||||
public:
|
||||
static bool Initialize(int count);
|
||||
static void Destroy();
|
||||
static bool Instruct(std::unique_ptr<thpoolman_param> pparam);
|
||||
};
|
||||
|
||||
#endif // S3FS_THREADPOOLMAN_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
259
src/types.h
259
src/types.h
@ -40,12 +40,6 @@
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
#if __cplusplus < 201103L
|
||||
#define OPERATOR_EXPLICIT
|
||||
#else
|
||||
#define OPERATOR_EXPLICIT explicit
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// xattrs_t
|
||||
//-------------------------------------------------------------------
|
||||
@ -54,118 +48,82 @@
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value
|
||||
{
|
||||
unsigned char* pvalue;
|
||||
size_t length;
|
||||
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
delete[] pvalue;
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
typedef std::map<std::string, std::string> xattrs_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// acl_t
|
||||
//-------------------------------------------------------------------
|
||||
class acl_t{
|
||||
public:
|
||||
enum Value{
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
acl_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
|
||||
const char* str() const
|
||||
{
|
||||
switch(value_){
|
||||
case PRIVATE:
|
||||
return "private";
|
||||
case PUBLIC_READ:
|
||||
return "public-read";
|
||||
case PUBLIC_READ_WRITE:
|
||||
return "public-read-write";
|
||||
case AWS_EXEC_READ:
|
||||
return "aws-exec-read";
|
||||
case AUTHENTICATED_READ:
|
||||
return "authenticated-read";
|
||||
case BUCKET_OWNER_READ:
|
||||
return "bucket-owner-read";
|
||||
case BUCKET_OWNER_FULL_CONTROL:
|
||||
return "bucket-owner-full-control";
|
||||
case LOG_DELIVERY_WRITE:
|
||||
return "log-delivery-write";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
static acl_t from_str(const char *acl)
|
||||
{
|
||||
if(0 == strcmp(acl, "private")){
|
||||
return PRIVATE;
|
||||
}else if(0 == strcmp(acl, "public-read")){
|
||||
return PUBLIC_READ;
|
||||
}else if(0 == strcmp(acl, "public-read-write")){
|
||||
return PUBLIC_READ_WRITE;
|
||||
}else if(0 == strcmp(acl, "aws-exec-read")){
|
||||
return AWS_EXEC_READ;
|
||||
}else if(0 == strcmp(acl, "authenticated-read")){
|
||||
return AUTHENTICATED_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-read")){
|
||||
return BUCKET_OWNER_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
|
||||
return BUCKET_OWNER_FULL_CONTROL;
|
||||
}else if(0 == strcmp(acl, "log-delivery-write")){
|
||||
return LOG_DELIVERY_WRITE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
enum class acl_t{
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
inline const char* str(acl_t value)
|
||||
{
|
||||
switch(value){
|
||||
case acl_t::PRIVATE:
|
||||
return "private";
|
||||
case acl_t::PUBLIC_READ:
|
||||
return "public-read";
|
||||
case acl_t::PUBLIC_READ_WRITE:
|
||||
return "public-read-write";
|
||||
case acl_t::AWS_EXEC_READ:
|
||||
return "aws-exec-read";
|
||||
case acl_t::AUTHENTICATED_READ:
|
||||
return "authenticated-read";
|
||||
case acl_t::BUCKET_OWNER_READ:
|
||||
return "bucket-owner-read";
|
||||
case acl_t::BUCKET_OWNER_FULL_CONTROL:
|
||||
return "bucket-owner-full-control";
|
||||
case acl_t::LOG_DELIVERY_WRITE:
|
||||
return "log-delivery-write";
|
||||
case acl_t::UNKNOWN:
|
||||
return nullptr;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
inline acl_t to_acl(const char *acl)
|
||||
{
|
||||
if(0 == strcmp(acl, "private")){
|
||||
return acl_t::PRIVATE;
|
||||
}else if(0 == strcmp(acl, "public-read")){
|
||||
return acl_t::PUBLIC_READ;
|
||||
}else if(0 == strcmp(acl, "public-read-write")){
|
||||
return acl_t::PUBLIC_READ_WRITE;
|
||||
}else if(0 == strcmp(acl, "aws-exec-read")){
|
||||
return acl_t::AWS_EXEC_READ;
|
||||
}else if(0 == strcmp(acl, "authenticated-read")){
|
||||
return acl_t::AUTHENTICATED_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-read")){
|
||||
return acl_t::BUCKET_OWNER_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
|
||||
return acl_t::BUCKET_OWNER_FULL_CONTROL;
|
||||
}else if(0 == strcmp(acl, "log-delivery-write")){
|
||||
return acl_t::LOG_DELIVERY_WRITE;
|
||||
}else{
|
||||
return acl_t::UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// sse_type_t
|
||||
//-------------------------------------------------------------------
|
||||
class sse_type_t{
|
||||
public:
|
||||
enum Value{
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
sse_type_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
|
||||
private:
|
||||
//OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
enum class sse_type_t{
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
enum signature_type_t {
|
||||
enum class signature_type_t {
|
||||
V2_ONLY,
|
||||
V4_ONLY,
|
||||
V2_OR_V4
|
||||
@ -182,7 +140,7 @@ struct etagpair
|
||||
std::string etag; // expected etag value
|
||||
int part_num; // part number
|
||||
|
||||
etagpair(const char* petag = NULL, int part = -1) : etag(petag ? petag : ""), part_num(part) {}
|
||||
explicit etagpair(const char* petag = nullptr, int part = -1) : etag(petag ? petag : ""), part_num(part) {}
|
||||
|
||||
~etagpair()
|
||||
{
|
||||
@ -196,8 +154,31 @@ struct etagpair
|
||||
}
|
||||
};
|
||||
|
||||
// Requires pointer stability and thus must be a list not a vector
|
||||
typedef std::list<etagpair> etaglist_t;
|
||||
|
||||
struct petagpool
|
||||
{
|
||||
// Requires pointer stability and thus must be a list not a vector
|
||||
std::list<etagpair> petaglist;
|
||||
|
||||
~petagpool()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
petaglist.clear();
|
||||
}
|
||||
|
||||
etagpair* add(const etagpair& etag_entity)
|
||||
{
|
||||
petaglist.push_back(etag_entity);
|
||||
return &petaglist.back();
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Each part information for Multipart upload
|
||||
//
|
||||
@ -211,7 +192,7 @@ struct filepart
|
||||
bool is_copy; // whether is copy multipart
|
||||
etagpair* petag; // use only parallel upload
|
||||
|
||||
filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = NULL) : uploaded(false), fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
|
||||
explicit filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = nullptr) : uploaded(false), fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
|
||||
|
||||
~filepart()
|
||||
{
|
||||
@ -226,7 +207,7 @@ struct filepart
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
is_copy = false;
|
||||
petag = NULL;
|
||||
petag = nullptr;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t& list, int partnum = -1)
|
||||
@ -234,7 +215,7 @@ struct filepart
|
||||
if(-1 == partnum){
|
||||
partnum = static_cast<int>(list.size()) + 1;
|
||||
}
|
||||
list.push_back(etagpair(NULL, partnum));
|
||||
list.push_back(etagpair(nullptr, partnum));
|
||||
petag = &list.back();
|
||||
}
|
||||
|
||||
@ -243,7 +224,7 @@ struct filepart
|
||||
petag = petagobj;
|
||||
}
|
||||
|
||||
int get_part_number()
|
||||
int get_part_number() const
|
||||
{
|
||||
if(!petag){
|
||||
return -1;
|
||||
@ -252,7 +233,7 @@ struct filepart
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::list<filepart> filepart_list_t;
|
||||
typedef std::vector<filepart> filepart_list_t;
|
||||
|
||||
//
|
||||
// Each part information for Untreated parts
|
||||
@ -263,7 +244,7 @@ struct untreatedpart
|
||||
off_t size; // number of untreated bytes
|
||||
long untreated_tag; // untreated part tag
|
||||
|
||||
untreatedpart(off_t part_start = 0, off_t part_size = 0, long part_untreated_tag = 0) : start(part_start), size(part_size), untreated_tag(part_untreated_tag)
|
||||
explicit untreatedpart(off_t part_start = 0, off_t part_size = 0, long part_untreated_tag = 0) : start(part_start), size(part_size), untreated_tag(part_untreated_tag)
|
||||
{
|
||||
if(part_start < 0 || part_size <= 0){
|
||||
clear(); // wrong parameter, so clear value.
|
||||
@ -282,9 +263,13 @@ struct untreatedpart
|
||||
untreated_tag = 0;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// Check if the areas overlap
|
||||
// However, even if the areas do not overlap, this method returns true if areas are adjacent.
|
||||
//
|
||||
bool check_overlap(off_t chk_start, off_t chk_size)
|
||||
{
|
||||
if(chk_start < 0 || chk_size <= 0 || (chk_start + chk_size) < start || (start + size) < chk_start){
|
||||
if(chk_start < 0 || chk_size <= 0 || start < 0 || size <= 0 || (chk_start + chk_size) < start || (start + size) < chk_start){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -306,7 +291,47 @@ struct untreatedpart
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::list<untreatedpart> untreated_list_t;
|
||||
typedef std::vector<untreatedpart> untreated_list_t;
|
||||
|
||||
//
|
||||
// Information on each part of multipart upload
|
||||
//
|
||||
struct mp_part
|
||||
{
|
||||
off_t start;
|
||||
off_t size;
|
||||
int part_num; // Set only for information to upload
|
||||
|
||||
explicit mp_part(off_t set_start = 0, off_t set_size = 0, int part = 0) : start(set_start), size(set_size), part_num(part) {}
|
||||
};
|
||||
|
||||
typedef std::vector<struct mp_part> mp_part_list_t;
|
||||
|
||||
inline off_t total_mp_part_list(const mp_part_list_t& mplist)
|
||||
{
|
||||
off_t size = 0;
|
||||
for(mp_part_list_t::const_iterator iter = mplist.begin(); iter != mplist.end(); ++iter){
|
||||
size += iter->size;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
//
|
||||
// Rename directory struct
|
||||
//
|
||||
struct mvnode
|
||||
{
|
||||
mvnode(std::string old_path, std::string new_path, bool is_dir, bool is_normdir)
|
||||
: old_path(std::move(old_path))
|
||||
, new_path(std::move(new_path))
|
||||
, is_dir(is_dir)
|
||||
, is_normdir(is_normdir)
|
||||
{}
|
||||
std::string old_path;
|
||||
std::string new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// mimes_t
|
||||
@ -322,7 +347,7 @@ typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_
|
||||
//-------------------------------------------------------------------
|
||||
// Typedefs specialized for use
|
||||
//-------------------------------------------------------------------
|
||||
typedef std::list<std::string> readline_t;
|
||||
typedef std::vector<std::string> readline_t;
|
||||
typedef std::map<std::string, std::string> kvmap_t;
|
||||
typedef std::map<std::string, kvmap_t> bucketkvmap_t;
|
||||
|
||||
|
||||
@ -31,10 +31,25 @@ testdir = test
|
||||
|
||||
noinst_PROGRAMS = \
|
||||
junk_data \
|
||||
write_multiblock
|
||||
write_multiblock \
|
||||
mknod_test \
|
||||
truncate_read_file \
|
||||
cr_filename
|
||||
|
||||
junk_data_SOURCES = junk_data.c
|
||||
write_multiblock_SOURCES = write_multiblock.cc
|
||||
junk_data_SOURCES = junk_data.cc
|
||||
write_multiblock_SOURCES = write_multiblock.cc
|
||||
mknod_test_SOURCES = mknod_test.cc
|
||||
truncate_read_file_SOURCES = truncate_read_file.cc
|
||||
cr_filename_SOURCES = cr_filename.cc
|
||||
|
||||
clang-tidy:
|
||||
clang-tidy \
|
||||
$(junk_data_SOURCES) \
|
||||
$(write_multiblock_SOURCES) \
|
||||
$(mknod_test_SOURCES) \
|
||||
$(truncate_read_file_SOURCES) \
|
||||
$(cr_filename_SOURCES) \
|
||||
-- $(DEPS_CFLAGS) $(CPPFLAGS)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
|
||||
63
test/compile_all_targets.sh
Executable file
63
test/compile_all_targets.sh
Executable file
@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
COMMON_FLAGS='-O -Wall -Werror'
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-gnutls
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-gnutls --with-nettle
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-nss
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-openssl
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS -std=c++23" ./configure
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS -m32" ./configure
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXX=clang++ CXXFLAGS="$COMMON_FLAGS -Wshorten-64-to-32" ./configure
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
77
test/cr_filename.cc
Normal file
77
test/cr_filename.cc
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2021 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// [NOTE]
|
||||
// This is a program used for file size inspection.
|
||||
// File size checking should be done by the caller of this program.
|
||||
// This program truncates the file and reads the file in another process
|
||||
// between truncate and flush(close file).
|
||||
//
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
if(argc != 2){
|
||||
fprintf(stderr, "[ERROR] Wrong paraemters\n");
|
||||
fprintf(stdout, "[Usage] cr_filename <base file path>\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int fd;
|
||||
char filepath[4096];
|
||||
snprintf(filepath, sizeof(filepath), "%s\r", argv[1]);
|
||||
filepath[sizeof(filepath) - 1] = '\0'; // for safety
|
||||
|
||||
// create empty file
|
||||
if(-1 == (fd = open(filepath, O_CREAT|O_RDWR, 0644))){
|
||||
fprintf(stderr, "[ERROR] Could not open file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
close(fd);
|
||||
|
||||
// stat
|
||||
struct stat buf;
|
||||
if(0 != stat(filepath, &buf)){
|
||||
fprintf(stderr, "[ERROR] Could not get stat for file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// remove file
|
||||
if(0 != unlink(filepath)){
|
||||
fprintf(stderr, "[ERROR] Could not remove file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -81,29 +81,29 @@ while read -r line; do
|
||||
if [ "${prev_line_type}" -eq 1 ]; then
|
||||
if [ "${number_type[1]}" -eq 2 ]; then
|
||||
# if passed, cut s3fs information messages
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
elif [ "${number_type[1]}" -eq 3 ]; then
|
||||
# if failed, print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
# there is start keyword but not end keyword, so print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
fi
|
||||
elif [ "${prev_line_type}" -eq 2 ] || [ "${prev_line_type}" -eq 3 ]; then
|
||||
if [ "${number_type[1]}" -eq 2 ] || [ "${number_type[1]}" -eq 3 ]; then
|
||||
# previous is end of chmpx, but this type is end of chmpx without start keyword. then print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
# this area is not from start to end, cut s3fs information messages
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
else
|
||||
if [ "${number_type[1]}" -eq 2 ] || [ "${number_type[1]}" -eq 3 ]; then
|
||||
# previous is normal, but this type is end of chmpx without start keyword. then print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
# this area is normal, cut s3fs information messages
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
fi
|
||||
if [ "${number_type[1]}" -eq 3 ]; then
|
||||
@ -121,9 +121,9 @@ file_line_cnt=$(wc -l "${SUITELOG}" | awk '{print $1}')
|
||||
tail_line_cnt=$((file_line_cnt - prev_line_number))
|
||||
|
||||
if [ "${prev_line_type}" -eq 1 ]; then
|
||||
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+\%'
|
||||
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
|
||||
#
|
||||
|
||||
@ -34,6 +34,8 @@
|
||||
# S3_ENDPOINT="us-east-1" Specify region
|
||||
# TMPDIR="/var/tmp" Set to use a temporary directory different
|
||||
# from /var/tmp
|
||||
# CHAOS_HTTP_PROXY=1 Test proxy(environment) by CHAOS HTTP PROXY
|
||||
# CHAOS_HTTP_PROXY_OPT=1 Test proxy(option) by CHAOS HTTP PROXY
|
||||
#
|
||||
# Example of running against Amazon S3 using a bucket named "bucket":
|
||||
#
|
||||
@ -66,7 +68,15 @@ set -o pipefail
|
||||
S3FS=../src/s3fs
|
||||
|
||||
# Allow these defaulted values to be overridden
|
||||
: "${S3_URL:="https://127.0.0.1:8080"}"
|
||||
#
|
||||
# [NOTE]
|
||||
# CHAOS HTTP PROXY does not support HTTPS.
|
||||
#
|
||||
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
: "${S3_URL:="https://127.0.0.1:8080"}"
|
||||
else
|
||||
: "${S3_URL:="http://127.0.0.1:8080"}"
|
||||
fi
|
||||
: "${S3_ENDPOINT:="us-east-1"}"
|
||||
: "${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}"
|
||||
: "${TEST_BUCKET_1:="s3fs-integration-test"}"
|
||||
@ -78,7 +88,7 @@ TEST_SCRIPT_DIR=$(pwd)
|
||||
export TEST_SCRIPT_DIR
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.9.0"
|
||||
S3PROXY_VERSION="2.0.0"
|
||||
S3PROXY_BINARY="${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}"
|
||||
|
||||
CHAOS_HTTP_PROXY_VERSION="1.1.0"
|
||||
@ -135,7 +145,11 @@ function start_s3proxy {
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
local S3PROXY_CONFIG="s3proxy-noauth.conf"
|
||||
else
|
||||
local S3PROXY_CONFIG="s3proxy.conf"
|
||||
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
local S3PROXY_CONFIG="s3proxy.conf"
|
||||
else
|
||||
local S3PROXY_CONFIG="s3proxy_http.conf"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "${S3PROXY_BINARY}" ]
|
||||
@ -147,9 +161,18 @@ function start_s3proxy {
|
||||
fi
|
||||
|
||||
# generate self-signed SSL certificate
|
||||
rm -f /tmp/keystore.jks /tmp/keystore.pem
|
||||
echo -e 'password\npassword\n\n\n\n\n\n\nyes' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1
|
||||
echo password | keytool -exportcert -keystore /tmp/keystore.jks -rfc -file /tmp/keystore.pem
|
||||
#
|
||||
# [NOTE]
|
||||
# The PROXY test is HTTP only, so do not create CA certificates.
|
||||
#
|
||||
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
S3PROXY_CACERT_FILE="/tmp/keystore.pem"
|
||||
rm -f /tmp/keystore.jks "${S3PROXY_CACERT_FILE}"
|
||||
printf 'password\npassword\n\n\n\n\n\n\ny' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1
|
||||
echo password | keytool -exportcert -keystore /tmp/keystore.jks -rfc -file "${S3PROXY_CACERT_FILE}"
|
||||
else
|
||||
S3PROXY_CACERT_FILE=""
|
||||
fi
|
||||
|
||||
"${STDBUF_BIN}" -oL -eL java -jar "${S3PROXY_BINARY}" --properties "${S3PROXY_CONFIG}" &
|
||||
S3PROXY_PID=$!
|
||||
@ -158,7 +181,7 @@ function start_s3proxy {
|
||||
wait_for_port 8080
|
||||
fi
|
||||
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ]; then
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ] || [ -n "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
if [ ! -e "${CHAOS_HTTP_PROXY_BINARY}" ]; then
|
||||
curl "https://github.com/bouncestorage/chaos-http-proxy/releases/download/chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}/chaos-http-proxy" \
|
||||
--fail --location --silent --output "${CHAOS_HTTP_PROXY_BINARY}"
|
||||
@ -206,25 +229,50 @@ function start_s3fs {
|
||||
fi
|
||||
|
||||
# On OSX only, we need to specify the direct_io and auto_cache flag.
|
||||
#
|
||||
# And Turn off creation and reference of spotlight index.
|
||||
# (Leaving spotlight ON will result in a lot of wasted requests,
|
||||
# which will affect test execution time)
|
||||
#
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local DIRECT_IO_OPT="-o direct_io -o auto_cache"
|
||||
|
||||
# disable spotlight
|
||||
sudo mdutil -a -i off
|
||||
else
|
||||
local DIRECT_IO_OPT=""
|
||||
fi
|
||||
|
||||
# Set environment variables or options for proxy.
|
||||
# And the PROXY test is HTTP only and does not set CA certificates.
|
||||
#
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ]; then
|
||||
export http_proxy="127.0.0.1:1080"
|
||||
S3FS_HTTP_PROXY_OPT=""
|
||||
elif [ -n "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
S3FS_HTTP_PROXY_OPT="-o proxy=http://127.0.0.1:1080"
|
||||
else
|
||||
S3FS_HTTP_PROXY_OPT=""
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# On macos, running s3fs via stdbuf will result in no response.
|
||||
# Therefore, when it is macos, it is not executed via stdbuf.
|
||||
# This patch may be temporary, but no other method has been found at this time.
|
||||
# For macos fuse-t, we need to specify the "noattrcache" option to
|
||||
# disable NFS caching.
|
||||
#
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local VIA_STDBUF_CMDLINE=""
|
||||
local FUSE_T_ATTRCACHE_OPT="-o noattrcache"
|
||||
else
|
||||
local VIA_STDBUF_CMDLINE="${STDBUF_BIN} -oL -eL"
|
||||
local FUSE_T_ATTRCACHE_OPT=""
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# On macOS we may get a VERIFY error for the self-signed certificate used by s3proxy.
|
||||
# We can specify NO_CHECK_CERT=1 to avoid this.
|
||||
#
|
||||
if [ -n "${NO_CHECK_CERT}" ] && [ "${NO_CHECK_CERT}" -eq 1 ]; then
|
||||
local NO_CHECK_CERT_OPT="-o no_check_certificate"
|
||||
else
|
||||
local NO_CHECK_CERT_OPT=""
|
||||
fi
|
||||
|
||||
# Common s3fs options:
|
||||
@ -247,8 +295,8 @@ function start_s3fs {
|
||||
# shellcheck disable=SC2086
|
||||
(
|
||||
set -x
|
||||
CURL_CA_BUNDLE=/tmp/keystore.pem \
|
||||
${VIA_STDBUF_CMDLINE} \
|
||||
CURL_CA_BUNDLE="${S3PROXY_CACERT_FILE}" \
|
||||
${STDBUF_BIN} -oL -eL \
|
||||
${VALGRIND_EXEC} \
|
||||
${S3FS} \
|
||||
${TEST_BUCKET_1} \
|
||||
@ -260,6 +308,9 @@ function start_s3fs {
|
||||
-o enable_unsigned_payload \
|
||||
${AUTH_OPT} \
|
||||
${DIRECT_IO_OPT} \
|
||||
${S3FS_HTTP_PROXY_OPT} \
|
||||
${NO_CHECK_CERT_OPT} \
|
||||
${FUSE_T_ATTRCACHE_OPT} \
|
||||
-o stat_cache_expire=1 \
|
||||
-o stat_cache_interval_expire=1 \
|
||||
-o dbglevel="${DBGLEVEL:=info}" \
|
||||
@ -277,15 +328,15 @@ function start_s3fs {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local TRYCOUNT=0
|
||||
while [ "${TRYCOUNT}" -le "${RETRIES:=20}" ]; do
|
||||
df | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"
|
||||
rc=$?
|
||||
if [ "${rc}" -eq 0 ]; then
|
||||
_DF_RESULT=$(df 2>/dev/null)
|
||||
if echo "${_DF_RESULT}" | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=$((TRYCOUNT + 1))
|
||||
done
|
||||
if [ "${rc}" -ne 0 ]; then
|
||||
if [ "${TRYCOUNT}" -gt "${RETRIES}" ]; then
|
||||
echo "Waited ${TRYCOUNT} seconds, but it could not be mounted."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -20,21 +20,20 @@
|
||||
|
||||
// Generate junk data at high speed. An alternative to dd if=/dev/urandom.
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
if (argc != 2) {
|
||||
return 1;
|
||||
}
|
||||
long long count = strtoull(argv[1], NULL, 10);
|
||||
uint64_t count = strtoull(argv[1], nullptr, 10);
|
||||
char buf[128 * 1024];
|
||||
long long i;
|
||||
for (i = 0; i < count; i += sizeof(buf)) {
|
||||
long long j;
|
||||
for (j = 0; j < sizeof(buf) / sizeof(i); ++j) {
|
||||
*((long long *)buf + j) = i / sizeof(i) + j;
|
||||
for (uint64_t i = 0; i < count; i += sizeof(buf)) {
|
||||
for (uint64_t j = 0; j < sizeof(buf) / sizeof(i); ++j) {
|
||||
*(reinterpret_cast<uint64_t *>(buf) + j) = i / sizeof(i) + j;
|
||||
}
|
||||
fwrite(buf, 1, sizeof(buf) > count - i ? count - i : sizeof(buf), stdout);
|
||||
}
|
||||
184
test/mknod_test.cc
Normal file
184
test/mknod_test.cc
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2021 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#ifndef __APPLE__
|
||||
#include <sys/sysmacros.h>
|
||||
#endif
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Const
|
||||
//---------------------------------------------------------
|
||||
static constexpr char usage_string[] = "Usage : \"mknod_test <base file path>\"";
|
||||
|
||||
static constexpr char str_mode_reg[] = "REGULAR";
|
||||
static constexpr char str_mode_chr[] = "CHARACTER";
|
||||
static constexpr char str_mode_blk[] = "BLOCK";
|
||||
static constexpr char str_mode_fifo[] = "FIFO";
|
||||
static constexpr char str_mode_sock[] = "SOCK";
|
||||
|
||||
static constexpr char str_ext_reg[] = "reg";
|
||||
static constexpr char str_ext_chr[] = "chr";
|
||||
static constexpr char str_ext_blk[] = "blk";
|
||||
static constexpr char str_ext_fifo[] = "fifo";
|
||||
static constexpr char str_ext_sock[] = "sock";
|
||||
|
||||
// [NOTE]
|
||||
// It would be nice if PATH_MAX could be used as is, but since there are
|
||||
// issues using on Linux and we also must support for macos, this simple
|
||||
// test program defines a fixed value for simplicity.
|
||||
//
|
||||
static constexpr size_t S3FS_TEST_PATH_MAX = 255;
|
||||
static constexpr size_t MAX_BASE_PATH_LENGTH = S3FS_TEST_PATH_MAX - 5;
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Test function
|
||||
//---------------------------------------------------------
|
||||
bool TestMknod(const char* basepath, mode_t mode)
|
||||
{
|
||||
if(!basepath){
|
||||
fprintf(stderr, "[ERROR] Called function with wrong basepath argument.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* str_mode;
|
||||
dev_t dev;
|
||||
char filepath[S3FS_TEST_PATH_MAX];
|
||||
switch(mode){
|
||||
case S_IFREG:
|
||||
str_mode = str_mode_reg;
|
||||
dev = 0;
|
||||
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_reg);
|
||||
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
|
||||
break;
|
||||
case S_IFCHR:
|
||||
str_mode = str_mode_chr;
|
||||
dev = makedev(0, 0);
|
||||
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_chr);
|
||||
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
|
||||
break;
|
||||
case S_IFBLK:
|
||||
str_mode = str_mode_blk;
|
||||
dev = makedev((unsigned int)(259), 0); // temporary value
|
||||
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_blk);
|
||||
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
|
||||
break;
|
||||
case S_IFIFO:
|
||||
str_mode = str_mode_fifo;
|
||||
dev = 0;
|
||||
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_fifo);
|
||||
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
|
||||
break;
|
||||
case S_IFSOCK:
|
||||
str_mode = str_mode_sock;
|
||||
dev = 0;
|
||||
snprintf(filepath, sizeof(filepath), "%s.%s", basepath, str_ext_sock);
|
||||
filepath[S3FS_TEST_PATH_MAX - 1] = '\0'; // for safety
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "[ERROR] Called function with wrong mode argument.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Create
|
||||
//
|
||||
if(0 != mknod(filepath, mode | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, dev)){
|
||||
fprintf(stderr, "[ERROR] Could not create %s file(%s) : errno = %d\n", str_mode, filepath, errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Check
|
||||
//
|
||||
struct stat st;
|
||||
if(0 != stat(filepath, &st)){
|
||||
fprintf(stderr, "[ERROR] Could not get stat from %s file(%s) : errno = %d\n", str_mode, filepath, errno);
|
||||
return false;
|
||||
}
|
||||
if(mode != (st.st_mode & S_IFMT)){
|
||||
fprintf(stderr, "[ERROR] Created %s file(%s) does not have 0%o stat\n", str_mode, filepath, mode);
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Remove
|
||||
//
|
||||
if(0 != unlink(filepath)){
|
||||
fprintf(stderr, "[WARNING] Could not remove %s file(%s) : errno = %d\n", str_mode, filepath, mode);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Main
|
||||
//---------------------------------------------------------
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
// Parse parameters
|
||||
if(2 != argc){
|
||||
fprintf(stderr, "[ERROR] No parameter is specified.\n");
|
||||
fprintf(stderr, "%s\n", usage_string);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(0 == strcmp("-h", argv[1]) || 0 == strcmp("--help", argv[1])){
|
||||
fprintf(stdout, "%s\n", usage_string);
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
if(MAX_BASE_PATH_LENGTH < strlen(argv[1])){
|
||||
fprintf(stderr, "[ERROR] Base file path is too long, it must be less than %zu\n", MAX_BASE_PATH_LENGTH);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// Test
|
||||
//
|
||||
// [NOTE]
|
||||
// Privilege is required to execute S_IFBLK.
|
||||
//
|
||||
if(0 != geteuid()){
|
||||
fprintf(stderr, "[WARNING] Skipping mknod(S_IFBLK) due to missing root privileges.\n");
|
||||
}
|
||||
if(!TestMknod(argv[1], S_IFREG) ||
|
||||
!TestMknod(argv[1], S_IFCHR) ||
|
||||
!TestMknod(argv[1], S_IFIFO) ||
|
||||
!TestMknod(argv[1], S_IFSOCK) ||
|
||||
(0 == geteuid() && !TestMknod(argv[1], S_IFBLK)))
|
||||
{
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -29,32 +29,39 @@ COMMON_FLAGS="-g -O0 -Wno-cpp"
|
||||
# run tests with libstc++ debug mode, https://gcc.gnu.org/onlinedocs/libstdc++/manual/debug_mode.html
|
||||
make clean
|
||||
./configure CXXFLAGS="$COMMON_FLAGS -D_GLIBCXX_DEBUG"
|
||||
make
|
||||
DBGLEVEL=debug make check -C test/
|
||||
make --jobs="$(nproc)"
|
||||
ALL_TESTS=1 make check -C test/
|
||||
|
||||
# run tests under AddressSanitizer, https://clang.llvm.org/docs/AddressSanitizer.html
|
||||
make clean
|
||||
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=address -fsanitize-address-use-after-scope"
|
||||
make
|
||||
ASAN_OPTIONS='detect_leaks=1,detect_stack_use_after_return=1' make check -C test/
|
||||
make --jobs="$(nproc)"
|
||||
ALL_TESTS=1 ASAN_OPTIONS='detect_leaks=1,detect_stack_use_after_return=1' make check -C test/
|
||||
|
||||
# run tests under MemorySanitizer, https://clang.llvm.org/docs/MemorySanitizer.html
|
||||
# TODO: this requires a custom libc++
|
||||
#make clean
|
||||
#./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=memory"
|
||||
#make
|
||||
#make check -C test/
|
||||
#make --jobs="$(nproc)"
|
||||
#ALL_TESTS=1 make check -C test/
|
||||
|
||||
# run tests under ThreadSanitizer, https://clang.llvm.org/docs/ThreadSanitizer.html
|
||||
make clean
|
||||
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=thread"
|
||||
make
|
||||
TSAN_OPTIONS='halt_on_error=1' make check -C test/
|
||||
make --jobs="$(nproc)"
|
||||
ALL_TESTS=1 TSAN_OPTIONS='halt_on_error=1' make check -C test/
|
||||
|
||||
# run tests under UndefinedBehaviorSanitizer, https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
|
||||
make clean
|
||||
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow"
|
||||
make
|
||||
make check -C test/
|
||||
make --jobs="$(nproc)"
|
||||
ALL_TESTS=1 make check -C test/
|
||||
|
||||
# run tests with Valgrind
|
||||
make clean
|
||||
./configure CXXFLAGS="$COMMON_FLAGS"
|
||||
make --jobs="$(nproc)"
|
||||
ALL_TESTS=1 RETRIES=100 VALGRIND="--leak-check=full --error-exitcode=1" S3_URL=http://127.0.0.1:8081 make check -C test/
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
s3proxy.endpoint=http://127.0.0.1:8081
|
||||
s3proxy.secure-endpoint=https://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v2-or-v4
|
||||
s3proxy.identity=local-identity
|
||||
|
||||
8
test/s3proxy_http.conf
Normal file
8
test/s3proxy_http.conf
Normal file
@ -0,0 +1,8 @@
|
||||
s3proxy.endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v2-or-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
|
||||
jclouds.provider=transient
|
||||
jclouds.identity=remote-identity
|
||||
jclouds.credential=remote-credential
|
||||
@ -90,7 +90,7 @@ find "${STATS_CDIR}" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read -
|
||||
do
|
||||
echo "Looking at ${part}"
|
||||
TMP_ATIME=$(echo "${part}" | cut -d: -f1)
|
||||
TMP_STATS=$(echo "${part}" | cut -d: -f2)
|
||||
TMP_STATS=$(echo "${part}" | cut -d: -f2-)
|
||||
TMP_CFILE=$(echo "${TMP_STATS}" | sed -e "s/\\.${BUCKET}\\.stat/${BUCKET}/")
|
||||
|
||||
if [ "$(stat -c %X "${TMP_STATS}")" -eq "${TMP_ATIME}" ]; then
|
||||
|
||||
@ -38,21 +38,28 @@ source test-utils.sh
|
||||
FAKE_FREE_DISK_SIZE=200
|
||||
ENSURE_DISKFREE_SIZE=10
|
||||
|
||||
# set up client-side encryption keys
|
||||
head -c 32 < /dev/urandom > /tmp/ssekey.bin
|
||||
base64 < /tmp/ssekey.bin > /tmp/ssekey
|
||||
openssl md5 -binary < /tmp/ssekey.bin | base64 > /tmp/ssekeymd5
|
||||
chmod 600 /tmp/ssekey /tmp/ssekey.bin /tmp/ssekeymd5
|
||||
|
||||
export CACHE_DIR
|
||||
export ENSURE_DISKFREE_SIZE
|
||||
if [ -n "${ALL_TESTS}" ]; then
|
||||
FLAGS=(
|
||||
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE} -o fake_diskfree=${FAKE_FREE_DISK_SIZE}"
|
||||
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE} -o fake_diskfree=${FAKE_FREE_DISK_SIZE} -o use_xattr -o update_parent_dir_stat"
|
||||
enable_content_md5
|
||||
enable_noobj_cache
|
||||
disable_noobj_cache
|
||||
"max_stat_cache_size=100"
|
||||
nocopyapi
|
||||
nomultipart
|
||||
notsup_compat_dir
|
||||
sigv2
|
||||
sigv4
|
||||
"singlepart_copy_limit=10" # limit size to exercise multipart code paths
|
||||
#use_sse # TODO: S3Proxy does not support SSE
|
||||
#use_sse=custom:/tmp/ssekey # TODO: S3Proxy does not support SSE
|
||||
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE} -o fake_diskfree=${FAKE_FREE_DISK_SIZE} -o streamupload"
|
||||
)
|
||||
else
|
||||
FLAGS=(
|
||||
|
||||
@ -71,6 +71,16 @@ else
|
||||
fi
|
||||
export SED_BUFFER_FLAG="--unbuffered"
|
||||
|
||||
# [NOTE]
|
||||
# Specifying cache disable option depending on stat(coreutils) version
|
||||
# TODO: investigate why this is necessary #2327
|
||||
#
|
||||
if stat --cached=never / >/dev/null 2>&1; then
|
||||
STAT_BIN=(stat --cache=never)
|
||||
else
|
||||
STAT_BIN=(stat)
|
||||
fi
|
||||
|
||||
function get_xattr() {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
xattr -p "$1" "$2"
|
||||
@ -95,11 +105,19 @@ function del_xattr() {
|
||||
fi
|
||||
}
|
||||
|
||||
function get_inode() {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
"${STAT_BIN[@]}" -f "%i" "$1"
|
||||
else
|
||||
"${STAT_BIN[@]}" --format "%i" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_size() {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
stat -f "%z" "$1"
|
||||
"${STAT_BIN[@]}" -f "%z" "$1"
|
||||
else
|
||||
stat -c %s "$1"
|
||||
"${STAT_BIN[@]}" --format "%s" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -137,22 +155,6 @@ function mk_test_file {
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# wait & check
|
||||
local BASE_TEXT_LENGTH; BASE_TEXT_LENGTH=$(echo "${TEXT}" | wc -c | awk '{print $1}')
|
||||
local TRY_COUNT=10
|
||||
while true; do
|
||||
local MK_TEXT_LENGTH
|
||||
MK_TEXT_LENGTH=$(wc -c "${TEST_TEXT_FILE}" | awk '{print $1}')
|
||||
if [ "${BASE_TEXT_LENGTH}" -eq "${MK_TEXT_LENGTH}" ]; then
|
||||
break
|
||||
fi
|
||||
local TRY_COUNT=$((TRY_COUNT - 1))
|
||||
if [ "${TRY_COUNT}" -le 0 ]; then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function rm_test_file {
|
||||
@ -254,12 +256,23 @@ function run_suite {
|
||||
ALT_TEST_TEXT_FILE="test-s3fs-ALT-${RANDOM}.txt"
|
||||
# shellcheck disable=SC2034
|
||||
BIG_FILE="big-file-s3fs-${RANDOM}.txt"
|
||||
"${t}" "${key_prefix}" && rc=$? || rc=$?
|
||||
if [ $rc = 0 ]; then
|
||||
# The following sequence runs tests in a subshell to allow continuation
|
||||
# on test failure, but still allowing errexit to be in effect during
|
||||
# the test.
|
||||
#
|
||||
# See:
|
||||
# https://groups.google.com/d/msg/gnu.bash.bug/NCK_0GmIv2M/dkeZ9MFhPOIJ
|
||||
# Other ways of trying to capture the return value will also disable
|
||||
# errexit in the function due to bash... compliance with POSIX?
|
||||
set +o errexit
|
||||
(set -o errexit; $t $key_prefix)
|
||||
# shellcheck disable=SC2181
|
||||
if [ $? == 0 ]; then
|
||||
report_pass "${t}"
|
||||
else
|
||||
report_fail "${t}"
|
||||
fi
|
||||
set -o errexit
|
||||
done
|
||||
cd "${orig_dir}"
|
||||
clean_run_dir
|
||||
@ -284,44 +297,55 @@ function run_suite {
|
||||
}
|
||||
|
||||
function get_ctime() {
|
||||
# ex: "1657504903.019784214"
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
stat -f "%c" "$1"
|
||||
"${STAT_BIN[@]}" -f "%Fc" "$1"
|
||||
else
|
||||
stat -c "%Z" "$1"
|
||||
"${STAT_BIN[@]}" --format "%.9Z" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_mtime() {
|
||||
# ex: "1657504903.019784214"
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
stat -f "%m" "$1"
|
||||
"${STAT_BIN[@]}" -f "%Fm" "$1"
|
||||
else
|
||||
stat -c "%Y" "$1"
|
||||
"${STAT_BIN[@]}" --format "%.9Y" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_atime() {
|
||||
# ex: "1657504903.019784214"
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
stat -f "%a" "$1"
|
||||
"${STAT_BIN[@]}" -f "%Fa" "$1"
|
||||
else
|
||||
stat -c "%X" "$1"
|
||||
"${STAT_BIN[@]}" --format "%.9X" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_permissions() {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
stat -f "%p" "$1"
|
||||
"${STAT_BIN[@]}" -f "%p" "$1"
|
||||
else
|
||||
stat -c "%a" "$1"
|
||||
"${STAT_BIN[@]}" --format "%a" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_user_and_group() {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
stat -f "%u:%g" "$1"
|
||||
else
|
||||
"${STAT_BIN[@]}" --format "%u:%g" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_content_type() {
|
||||
local INFO_STR
|
||||
INFO_STR=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "$1")
|
||||
if [[ "${INFO_STR}" != *"$2"* ]]
|
||||
INFO_STR=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "$1" | jq -r .ContentType)
|
||||
if [ "${INFO_STR}" != "$2" ]
|
||||
then
|
||||
echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}"
|
||||
exit 1
|
||||
echo "Expected Content-Type: $2 but got: ${INFO_STR}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@ -336,8 +360,21 @@ function aws_cli() {
|
||||
if [ -n "${S3FS_PROFILE}" ]; then
|
||||
FLAGS="--profile ${S3FS_PROFILE}"
|
||||
fi
|
||||
|
||||
if [ "$1" = "s3" ] && [ "$2" != "ls" ] && [ "$2" != "mb" ]; then
|
||||
if s3fs_args | grep -q use_sse=custom; then
|
||||
FLAGS="${FLAGS} --sse-c AES256 --sse-c-key fileb:///tmp/ssekey.bin"
|
||||
fi
|
||||
elif [ "$1" = "s3api" ] && [ "$2" != "head-bucket" ]; then
|
||||
if s3fs_args | grep -q use_sse=custom; then
|
||||
FLAGS="${FLAGS} --sse-customer-algorithm AES256 --sse-customer-key $(cat /tmp/ssekey) --sse-customer-key-md5 $(cat /tmp/ssekeymd5)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# AWS_EC2_METADATA_DISABLED for preventing the metadata service(to 169.254.169.254).
|
||||
# shellcheck disable=SC2086,SC2068
|
||||
aws $@ --endpoint-url "${S3_URL}" --ca-bundle /tmp/keystore.pem ${FLAGS}
|
||||
AWS_EC2_METADATA_DISABLED=true aws $@ --endpoint-url "${S3_URL}" --ca-bundle /tmp/keystore.pem ${FLAGS}
|
||||
}
|
||||
|
||||
function wait_for_port() {
|
||||
@ -359,12 +396,43 @@ function make_random_string() {
|
||||
else
|
||||
local END_POS=8
|
||||
fi
|
||||
|
||||
"${BASE64_BIN}" --wrap=0 < /dev/urandom | tr -d /+ | head -c "${END_POS}"
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local BASE64_OPT="--break=0"
|
||||
else
|
||||
local BASE64_OPT="--wrap=0"
|
||||
fi
|
||||
"${BASE64_BIN}" "${BASE64_OPT}" < /dev/urandom 2>/dev/null | tr -d /+ | head -c "${END_POS}"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function s3fs_args() {
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
ps -o args -p "${S3FS_PID}" | tail -n +2
|
||||
else
|
||||
ps -o args -p "${S3FS_PID}" --no-headers
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# $1: sleep seconds
|
||||
# $2: OS type(ex. 'Darwin', unset(means all os type))
|
||||
#
|
||||
# [NOTE] macos fuse-t
|
||||
# macos fuse-t mounts over NFS, and the mtime/ctime/atime attribute
|
||||
# values are in seconds(not m/u/n-sec).
|
||||
# Therefore, unlike tests on other OSs, we have to wait at least 1
|
||||
# second.
|
||||
# This function is called primarily for this purpose.
|
||||
#
|
||||
function wait_ostype() {
|
||||
if [ -z "$2" ] || uname | grep -q "$2"; then
|
||||
if [ -n "$1" ] && ! (echo "$1" | grep -q '[^0-9]'); then
|
||||
sleep "$1"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
|
||||
82
test/truncate_read_file.cc
Normal file
82
test/truncate_read_file.cc
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2021 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// [NOTE]
|
||||
// This is a program used for file size inspection.
|
||||
// File size checking should be done by the caller of this program.
|
||||
// This program truncates the file and reads the file in another process
|
||||
// between truncate and flush(close file).
|
||||
//
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
if(argc != 3){
|
||||
fprintf(stderr, "[ERROR] Wrong paraemters\n");
|
||||
fprintf(stdout, "[Usage] truncate_read_file <file path> <truncate size(bytes)>\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
const char* filepath = argv[1];
|
||||
off_t size = static_cast<off_t>(strtoull(argv[2], nullptr, 10));
|
||||
int fd;
|
||||
|
||||
// open file
|
||||
if(-1 == (fd = open(filepath, O_RDWR))){
|
||||
fprintf(stderr, "[ERROR] Could not open file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// truncate
|
||||
if(0 != ftruncate(fd, size)){
|
||||
fprintf(stderr, "[ERROR] Could not truncate file(%s) to %lld byte.\n", filepath, (long long)size);
|
||||
close(fd);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// run sub-process for reading file(cat)
|
||||
char szCommand[1024];
|
||||
snprintf(szCommand, sizeof(szCommand), "cat %s >/dev/null 2>&1", filepath);
|
||||
szCommand[sizeof(szCommand) - 1] = '\0'; // for safety
|
||||
if(0 != system(szCommand)){
|
||||
fprintf(stderr, "[ERROR] Failed to run sub-process(cat).\n");
|
||||
close(fd);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// close file(flush)
|
||||
close(fd);
|
||||
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -18,19 +18,21 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <climits>
|
||||
#include <string>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Structures and Typedefs
|
||||
@ -41,38 +43,32 @@ struct write_block_part
|
||||
off_t size;
|
||||
};
|
||||
|
||||
typedef std::list<write_block_part> wbpart_list_t;
|
||||
typedef std::list<std::string> strlist_t;
|
||||
typedef std::vector<write_block_part> wbpart_list_t;
|
||||
typedef std::list<std::string> strlist_t;
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Const
|
||||
//---------------------------------------------------------
|
||||
const char usage_string[] = "Usage : \"write_multiblock -f <file path> -p <start offset:size>\" (allows -f and -p multiple times.)";
|
||||
static constexpr char usage_string[] = "Usage : \"write_multiblock -f <file path> -p <start offset:size>\" (allows -f and -p multiple times.)";
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Utility functions
|
||||
//---------------------------------------------------------
|
||||
static unsigned char* create_random_data(off_t size)
|
||||
static std::unique_ptr<unsigned char[]> create_random_data(off_t size)
|
||||
{
|
||||
int fd;
|
||||
if(-1 == (fd = open("/dev/urandom", O_RDONLY))){
|
||||
std::cerr << "[ERROR] Could not open /dev/urandom" << std::endl;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
unsigned char* pbuff;
|
||||
if(NULL == (pbuff = reinterpret_cast<unsigned char*>(malloc(size)))){
|
||||
std::cerr << "[ERROR] Could not allocate memory." << std::endl;
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
std::unique_ptr<unsigned char[]> pbuff(new unsigned char[size]);
|
||||
for(ssize_t readpos = 0, readcnt = 0; readpos < size; readpos += readcnt){
|
||||
if(-1 == (readcnt = read(fd, &(pbuff[readpos]), static_cast<size_t>(size - readpos)))){
|
||||
if(EAGAIN != errno && EWOULDBLOCK != errno && EINTR != errno){
|
||||
std::cerr << "[ERROR] Failed reading from /dev/urandom with errno: " << errno << std::endl;
|
||||
free(pbuff);
|
||||
close(fd);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
readcnt = 0;
|
||||
}
|
||||
@ -87,7 +83,7 @@ static off_t cvt_string_to_number(const char* pstr)
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
char* ptemp = NULL;
|
||||
char* ptemp = nullptr;
|
||||
long long result = strtoll(pstr, &ptemp, 10);
|
||||
|
||||
if(!ptemp || ptemp == pstr || *ptemp != '\0'){
|
||||
@ -170,7 +166,7 @@ static bool parse_arguments(int argc, char** argv, strlist_t& files, wbpart_list
|
||||
while(-1 != (opt = getopt(argc, argv, "f:p:"))){
|
||||
switch(opt){
|
||||
case 'f':
|
||||
files.push_back(std::string(optarg));
|
||||
files.emplace_back(optarg);
|
||||
break;
|
||||
case 'p':
|
||||
if(!parse_write_blocks(optarg, wbparts, max_size)){
|
||||
@ -205,10 +201,7 @@ int main(int argc, char** argv)
|
||||
}
|
||||
|
||||
// make data and buffer
|
||||
unsigned char* pData;
|
||||
if(NULL == (pData = create_random_data(max_size))){
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
std::unique_ptr<unsigned char[]> pData = create_random_data(max_size);
|
||||
|
||||
for(strlist_t::const_iterator fiter = files.begin(); fiter != files.end(); ++fiter){
|
||||
// open/create file
|
||||
@ -216,19 +209,16 @@ int main(int argc, char** argv)
|
||||
struct stat st;
|
||||
if(0 == stat(fiter->c_str(), &st)){
|
||||
if(!S_ISREG(st.st_mode)){
|
||||
std::cerr << "[ERROR] File " << fiter->c_str() << " is existed, but it is not regular file." << std::endl;
|
||||
free(pData);
|
||||
std::cerr << "[ERROR] File " << *fiter << " is existed, but it is not regular file." << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if(-1 == (fd = open(fiter->c_str(), O_WRONLY))){
|
||||
std::cerr << "[ERROR] Could not open " << fiter->c_str() << std::endl;
|
||||
free(pData);
|
||||
std::cerr << "[ERROR] Could not open " << *fiter << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}else{
|
||||
if(-1 == (fd = open(fiter->c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644))){
|
||||
std::cerr << "[ERROR] Could not create " << fiter->c_str() << std::endl;
|
||||
free(pData);
|
||||
std::cerr << "[ERROR] Could not create " << *fiter << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
@ -239,9 +229,8 @@ int main(int argc, char** argv)
|
||||
for(ssize_t writepos = 0, writecnt = 0; writepos < piter->size; writepos += writecnt){
|
||||
if(-1 == (writecnt = pwrite(fd, &(pData[writepos]), static_cast<size_t>(piter->size - writepos), (piter->start + writepos)))){
|
||||
if(EAGAIN != errno && EWOULDBLOCK != errno && EINTR != errno){
|
||||
std::cerr << "[ERROR] Failed writing to " << fiter->c_str() << " by errno : " << errno << std::endl;
|
||||
std::cerr << "[ERROR] Failed writing to " << *fiter << " by errno : " << errno << std::endl;
|
||||
close(fd);
|
||||
free(pData);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
writecnt = 0;
|
||||
@ -251,7 +240,6 @@ int main(int argc, char** argv)
|
||||
// close file
|
||||
close(fd);
|
||||
}
|
||||
free(pData);
|
||||
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user