Use curl instead of AWS CLI (#2689)

The latter starts up significantly slower which impedes integration
test times.  curl has some limitations, e.g., no SSE support.
This commit is contained in:
Andrew Gaul
2025-08-28 08:36:07 +09:00
committed by GitHub
parent e8b5a4109a
commit 87d7a5822e
6 changed files with 88 additions and 84 deletions

View File

@ -140,11 +140,6 @@ jobs:
if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi
done
- name: Install awscli2
run: |
curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o /tmp/AWSCLIV2.pkg
sudo installer -pkg /tmp/AWSCLIV2.pkg -target /
- name: Build
run: |
./autogen.sh

View File

@ -57,11 +57,7 @@ CONTAINER_OSNAME=$(echo "${CONTAINER_FULLNAME}" | cut -d: -f1)
# shellcheck disable=SC2034
CONTAINER_OSVERSION=$(echo "${CONTAINER_FULLNAME}" | cut -d: -f2)
#-----------------------------------------------------------
# Common variables for awscli2
#-----------------------------------------------------------
AWSCLI_URI="https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip"
AWSCLI_ZIP_FILE="awscliv2.zip"
CURL_DIRECT_URL="https://github.com/moparisthebest/static-curl/releases/latest/download/curl-$(uname -m | sed -e s/x86_64/amd64/)"
#-----------------------------------------------------------
# Parameters for configure(set environments)
@ -79,7 +75,7 @@ CONFIGURE_OPTIONS="--prefix=/usr --with-openssl"
#
PACKAGE_ENABLE_REPO_OPTIONS=""
PACKAGE_INSTALL_ADDITIONAL_OPTIONS=""
AWSCLI_DIRECT_INSTALL=1
CURL_DIRECT_INSTALL=0
if [ "${CONTAINER_FULLNAME}" = "ubuntu:25.04" ] ||
[ "${CONTAINER_FULLNAME}" = "ubuntu:24.04" ]; then
@ -107,7 +103,6 @@ if [ "${CONTAINER_FULLNAME}" = "ubuntu:25.04" ] ||
openjdk-21-jre-headless
pkg-config
python3
unzip
)
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
@ -135,9 +130,10 @@ elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
openjdk-21-jre-headless
pkg-config
python3
unzip
)
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "debian:trixie" ]; then
PACKAGE_MANAGER_BIN="apt-get"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
@ -164,7 +160,6 @@ elif [ "${CONTAINER_FULLNAME}" = "debian:trixie" ]; then
pkg-config
procps
python3
unzip
)
elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ] ||
@ -194,9 +189,10 @@ elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ] ||
pkg-config
procps
python3
unzip
)
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:10" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
@ -233,7 +229,6 @@ elif [ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:10" ]; then
perl-Test-Harness
procps
python3
unzip
xz
https://dl.fedoraproject.org/pub/epel/epel-release-latest-10.noarch.rpm
)
@ -274,11 +269,12 @@ elif [ "${CONTAINER_FULLNAME}" = "rockylinux:9" ]; then
perl-Test-Harness
procps
python3
unzip
xz
https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
)
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "rockylinux:8" ]; then
PACKAGE_MANAGER_BIN="dnf"
PACKAGE_UPDATE_OPTIONS="update -y -qq"
@ -307,9 +303,10 @@ elif [ "${CONTAINER_FULLNAME}" = "rockylinux:8" ]; then
openssl-devel
perl-Test-Harness
python3
unzip
)
CURL_DIRECT_INSTALL=1
elif [ "${CONTAINER_FULLNAME}" = "fedora:42" ] ||
[ "${CONTAINER_FULLNAME}" = "fedora:41" ]; then
PACKAGE_MANAGER_BIN="dnf"
@ -345,7 +342,6 @@ elif [ "${CONTAINER_FULLNAME}" = "fedora:42" ] ||
perl-Test-Harness
procps
ShellCheck
unzip
)
elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ]; then
@ -369,7 +365,6 @@ elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ]; then
openssl
openssl-devel
procps
unzip
)
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.22" ]; then
@ -402,8 +397,6 @@ elif [ "${CONTAINER_FULLNAME}" = "alpine:3.22" ]; then
sed
)
AWSCLI_DIRECT_INSTALL=0
else
echo "No container configured for: ${CONTAINER_FULLNAME}"
exit 1
@ -427,17 +420,22 @@ echo "${PRGNAME} [INFO] Install packages."
# Check Java version
java -version
#
# Install awscli
#
if [ "${AWSCLI_DIRECT_INSTALL}" -eq 1 ]; then
echo "${PRGNAME} [INFO] Install awscli2 package."
# Install newer curl for older distributions
if [ "${CURL_DIRECT_INSTALL}" -eq 1 ]; then
echo "${PRGNAME} [INFO] Install newer curl package."
curl "${AWSCLI_URI}" -o "/tmp/${AWSCLI_ZIP_FILE}"
unzip "/tmp/${AWSCLI_ZIP_FILE}" -d /tmp
/tmp/aws/install
curl --fail --location --silent --output "/usr/local/bin/curl" "${CURL_DIRECT_URL}"
chmod +x "/usr/local/bin/curl"
# Rocky Linux 8 and 9 have a different certificate path
if [ ! -f /etc/ssl/certs/ca-certificates.crt ]; then
ln -s /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
fi
fi
# Check curl version
curl --version
#-----------------------------------------------------------
# Set environment for configure
#-----------------------------------------------------------

View File

@ -84,6 +84,7 @@ fi
export TEST_BUCKET_1
export S3_URL
export S3_ENDPOINT
export S3PROXY_CACERT_FILE
TEST_SCRIPT_DIR=$(pwd)
export TEST_SCRIPT_DIR
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}

View File

@ -33,7 +33,7 @@ function test_create_empty_file {
check_file_size "${TEST_TEXT_FILE}" 0
aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${OBJECT_NAME}"
s3_head "${TEST_BUCKET_1}/${OBJECT_NAME}"
rm_test_file
}
@ -381,7 +381,7 @@ function test_remove_nonempty_directory {
function test_external_directory_creation {
describe "Test external directory creation ..."
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/directory/"${TEST_TEXT_FILE}"
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "data" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
# shellcheck disable=SC2010
ls | grep -q directory
stat directory >/dev/null 2>&1
@ -406,7 +406,7 @@ function test_external_modification {
sleep 1
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "new new" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp "${TEST_TEXT_FILE}" <(echo "new new")
rm -f "${TEST_TEXT_FILE}"
@ -425,7 +425,7 @@ function test_external_creation {
# If noobj_cache is enabled, we cannot be sure that it is registered in that cache.
# That's because an error will occur if the upload by aws cli takes more than 1 second.
#
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "data" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
wait_ostype 1
@ -437,7 +437,7 @@ function test_external_creation {
function test_read_external_object() {
describe "create objects via aws CLI and read via s3fs ..."
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "test" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
cmp "${TEST_TEXT_FILE}" <(echo "test")
rm -f "${TEST_TEXT_FILE}"
}
@ -448,7 +448,7 @@ function test_read_external_dir_object() {
local SUB_DIR_TEST_FILE; SUB_DIR_TEST_FILE="${SUB_DIR_NAME}/${TEST_TEXT_FILE}"
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${SUB_DIR_TEST_FILE}"
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "test" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
if stat "${SUB_DIR_NAME}" | grep -q '1969-12-31[[:space:]]23:59:59[.]000000000'; then
echo "sub directory a/c/m time is underflow(-1)."
@ -476,7 +476,7 @@ function test_update_metadata_external_small_object() {
# chmod
#
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHMOD_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
chmod +x "${TEST_CHMOD_FILE}"
cmp "${TEST_CHMOD_FILE}" <(echo "${TEST_INPUT}")
@ -484,7 +484,7 @@ function test_update_metadata_external_small_object() {
# chown
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHOWN_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
chown "${UID}" "${TEST_CHOWN_FILE}"
cmp "${TEST_CHOWN_FILE}" <(echo "${TEST_INPUT}")
@ -492,7 +492,7 @@ function test_update_metadata_external_small_object() {
# utimens
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_UTIMENS_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
touch "${TEST_UTIMENS_FILE}"
cmp "${TEST_UTIMENS_FILE}" <(echo "${TEST_INPUT}")
@ -500,7 +500,7 @@ function test_update_metadata_external_small_object() {
# set xattr
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_SETXATTR_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}"
set_xattr key value "${TEST_SETXATTR_FILE}"
cmp "${TEST_SETXATTR_FILE}" <(echo "${TEST_INPUT}")
XATTR_VALUE=$(get_xattr key "${TEST_SETXATTR_FILE}")
@ -526,7 +526,7 @@ function test_update_metadata_external_small_object() {
#
if ! uname | grep -q Darwin; then
OBJECT_NAME=$(basename "${PWD}")/"${TEST_RMXATTR_FILE}"
echo "${TEST_INPUT}" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --metadata xattr=%7B%22key%22%3A%22dmFsdWU%3D%22%7D
echo "${TEST_INPUT}" | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-xattr: %7B%22key%22%3A%22dmFsdWU%3D%22%7D"
del_xattr key "${TEST_RMXATTR_FILE}"
cmp "${TEST_RMXATTR_FILE}" <(echo "${TEST_INPUT}")
if find_xattr key "${TEST_RMXATTR_FILE}"; then
@ -561,7 +561,7 @@ function test_update_metadata_external_large_object() {
# chmod
#
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHMOD_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
chmod +x "${TEST_CHMOD_FILE}"
cmp "${TEST_CHMOD_FILE}" "${TEMP_DIR}/${BIG_FILE}"
@ -569,7 +569,7 @@ function test_update_metadata_external_large_object() {
# chown
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_CHOWN_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
chown "${UID}" "${TEST_CHOWN_FILE}"
cmp "${TEST_CHOWN_FILE}" "${TEMP_DIR}/${BIG_FILE}"
@ -577,7 +577,7 @@ function test_update_metadata_external_large_object() {
# utimens
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_UTIMENS_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
touch "${TEST_UTIMENS_FILE}"
cmp "${TEST_UTIMENS_FILE}" "${TEMP_DIR}/${BIG_FILE}"
@ -585,7 +585,7 @@ function test_update_metadata_external_large_object() {
# set xattr
#
OBJECT_NAME=$(basename "${PWD}")/"${TEST_SETXATTR_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" < "${TEMP_DIR}/${BIG_FILE}"
set_xattr key value "${TEST_SETXATTR_FILE}"
cmp "${TEST_SETXATTR_FILE}" "${TEMP_DIR}/${BIG_FILE}"
XATTR_VALUE=$(get_xattr key "${TEST_SETXATTR_FILE}")
@ -611,7 +611,7 @@ function test_update_metadata_external_large_object() {
#
if ! uname | grep -q Darwin; then
OBJECT_NAME=$(basename "${PWD}")/"${TEST_RMXATTR_FILE}"
aws_cli s3 cp "${TEMP_DIR}/${BIG_FILE}" "s3://${TEST_BUCKET_1}/${OBJECT_NAME}" --no-progress --metadata xattr=%7B%22key%22%3A%22dmFsdWU%3D%22%7D
s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-xattr: %7B%22key%22%3A%22dmFsdWU%3D%22%7D" < "${TEMP_DIR}/${BIG_FILE}"
del_xattr key "${TEST_RMXATTR_FILE}"
cmp "${TEST_RMXATTR_FILE}" "${TEMP_DIR}/${BIG_FILE}"
if find_xattr key "${TEST_RMXATTR_FILE}"; then
@ -954,7 +954,7 @@ function test_update_time_chmod() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -981,7 +981,7 @@ function test_update_time_chown() {
#
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime=${t0}" --header "x-amz-meta-ctime=${t0}" --header "x-amz-meta-mtime=${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1024,7 +1024,7 @@ function test_update_time_xattr() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1061,7 +1061,7 @@ function test_update_time_touch() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1085,7 +1085,7 @@ function test_update_time_touch_a() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1121,7 +1121,7 @@ function test_update_time_append() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1145,7 +1145,7 @@ function test_update_time_cp_p() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1171,7 +1171,7 @@ function test_update_time_mv() {
local t0=1000000000 # 9 September 2001
local OBJECT_NAME; OBJECT_NAME=$(basename "${PWD}")/"${TEST_TEXT_FILE}"
echo data | aws_cli s3 cp --metadata="atime=${t0},ctime=${t0},mtime=${t0}" - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
echo data | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME}" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}"
local base_atime; base_atime=$(get_atime "${TEST_TEXT_FILE}")
local base_ctime; base_ctime=$(get_ctime "${TEST_TEXT_FILE}")
local base_mtime; base_mtime=$(get_mtime "${TEST_TEXT_FILE}")
@ -1205,7 +1205,7 @@ function test_update_directory_time_chmod() {
#
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1231,7 +1231,7 @@ function test_update_directory_time_chown {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1267,7 +1267,7 @@ function test_update_directory_time_set_xattr {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1303,7 +1303,7 @@ function test_update_directory_time_touch {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -1328,7 +1328,7 @@ function test_update_directory_time_touch_a {
local t0=1000000000 # 9 September 2001
local DIRECTORY_NAME; DIRECTORY_NAME=$(basename "${PWD}")/"${TEST_DIR}"
aws_cli s3api put-object --content-type="application/x-directory" --metadata="atime=${t0},ctime=${t0},mtime=${t0}" --bucket "${TEST_BUCKET_1}" --key "$DIRECTORY_NAME/"
s3_cp "${TEST_BUCKET_1}/${DIRECTORY_NAME}/" --header "Content-Type: application/x-directory" --header "x-amz-meta-atime: ${t0}" --header "x-amz-meta-ctime: ${t0}" --header "x-amz-meta-mtime: ${t0}" < /dev/null
local base_atime; base_atime=$(get_atime "${TEST_DIR}")
local base_ctime; base_ctime=$(get_ctime "${TEST_DIR}")
@ -2379,8 +2379,8 @@ function test_not_existed_dir_obj() {
#
local OBJECT_NAME_1; OBJECT_NAME_1="${DIR_NAME}/not_existed_dir_single/${TEST_TEXT_FILE}"
local OBJECT_NAME_2; OBJECT_NAME_2="${DIR_NAME}/not_existed_dir_parent/not_existed_dir_child/${TEST_TEXT_FILE}"
echo data1 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_1}"
echo data2 | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME_2}"
echo data1 | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME_1}"
echo data2 | s3_cp "${TEST_BUCKET_1}/${OBJECT_NAME_2}"
# Top directory
# shellcheck disable=SC2010
@ -2744,7 +2744,7 @@ function test_file_names_longer_than_posix() {
fi
rm -f "${a256}"
echo data | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${DIR_NAME}/${a256}"
echo data | s3_cp "${TEST_BUCKET_1}/${DIR_NAME}/${a256}"
files=(*)
if [ "${#files[@]}" = 0 ]; then
echo "failed to list long file name"

View File

@ -69,8 +69,8 @@ fi
start_s3proxy
if ! aws_cli s3api head-bucket --bucket "${TEST_BUCKET_1}" --region "${S3_ENDPOINT}"; then
aws_cli s3 mb "s3://${TEST_BUCKET_1}" --region "${S3_ENDPOINT}"
if ! s3_head "${TEST_BUCKET_1}"; then
s3_mb "${TEST_BUCKET_1}"
fi
for flag in "${FLAGS[@]}"; do

View File

@ -363,7 +363,10 @@ function get_user_and_group() {
function check_content_type() {
local INFO_STR
INFO_STR=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "$1" | jq -r .ContentType)
TEMPNAME="$(mktemp)"
s3_head "${TEST_BUCKET_1}/$1" --dump-header "$TEMPNAME"
INFO_STR=$(sed -n 's/^Content-Type: //pi' "$TEMPNAME" | tr -d '\r\n')
rm -f "$TEMPNAME"
if [ "${INFO_STR}" != "$2" ]
then
echo "Expected Content-Type: $2 but got: ${INFO_STR}"
@ -377,27 +380,34 @@ function get_disk_avail_size() {
echo "${DISK_AVAIL_SIZE}"
}
function aws_cli() {
local FLAGS=""
if [ -n "${S3FS_PROFILE}" ]; then
FLAGS="--profile ${S3FS_PROFILE}"
fi
function s3_head() {
local S3_PATH=$1
shift
curl --aws-sigv4 "aws:amz:$S3_ENDPOINT:s3" --user "$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" \
--cacert "$S3PROXY_CACERT_FILE" --fail --silent \
"$@" \
--head "$S3_URL/$S3_PATH"
}
if [ "$1" = "s3" ] && [ "$2" != "ls" ] && [ "$2" != "mb" ]; then
if s3fs_args | grep -q use_sse=custom; then
FLAGS="${FLAGS} --sse-c AES256 --sse-c-key fileb:///tmp/ssekey.bin"
fi
elif [ "$1" = "s3api" ] && [ "$2" != "head-bucket" ]; then
if s3fs_args | grep -q use_sse=custom; then
FLAGS="${FLAGS} --sse-customer-algorithm AES256 --sse-customer-key $(cat /tmp/ssekey) --sse-customer-key-md5 $(cat /tmp/ssekeymd5)"
fi
fi
function s3_mb() {
local S3_BUCKET=$1
curl --aws-sigv4 "aws:amz:$S3_ENDPOINT:s3" --user "$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" \
--cacert "$S3PROXY_CACERT_FILE" --fail --silent \
--request PUT "$S3_URL/$S3_BUCKET"
}
# [NOTE]
# AWS_EC2_METADATA_DISABLED for preventing the metadata service(to 169.254.169.254).
# shellcheck disable=SC2086,SC2068
# TODO: disable checksums to work around https://github.com/gaul/s3proxy/issues/760
AWS_EC2_METADATA_DISABLED=true AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED aws $@ --endpoint-url "${S3_URL}" --ca-bundle /tmp/keystore.pem ${FLAGS}
function s3_cp() {
local S3_PATH=$1
shift
TEMPNAME="$(mktemp)"
cat > "$TEMPNAME"
# TODO: use filenames instead of stdin?
curl --aws-sigv4 "aws:amz:$S3_ENDPOINT:s3" --user "$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY" \
--cacert "$S3PROXY_CACERT_FILE" --fail --silent \
--header "Content-Length: $(wc -c < "$TEMPNAME")" \
"$@" \
--request PUT --data-binary "@$TEMPNAME" "$S3_URL/$S3_PATH"
rm -f "$TEMPNAME"
}
function wait_for_port() {