Skip to content

Commit

Permalink
Reverted the macos CI process(using macos-fuse-t)
Browse files Browse the repository at this point in the history
  • Loading branch information
ggtakec committed Oct 18, 2023
1 parent 2871975 commit 98991cc
Show file tree
Hide file tree
Showing 11 changed files with 317 additions and 132 deletions.
32 changes: 12 additions & 20 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -127,18 +127,15 @@ jobs:
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
# [NOTE]
# This Job does not work for macOS 11 and later because load_osxfuse returns exit code = 1.
# Apple states "You must be signed in as an administrator user to install new kernel
# extensions, and your Mac must be rebooted for the extensions to load.", so it needs
# to reboot OS.
# As of May 2023, GitHub Actions are no longer able to launch macos 10.15 as runner,
# so we can not run this Job.
# In the future, if it is found a solution, we will resume this Job execution.
# Using macos-fuse-t
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
# see. https://github.com/macos-fuse-t/fuse-t
# About osxfuse
# This job doesn't work with Github Actions using macOS 11+ because "load_osxfuse" returns
# "exit code = 1".(requires OS reboot)
#
macos10:
if: false

runs-on: macos-10.15
macos12:
runs-on: macos-12

steps:
- name: Checkout source code
Expand All @@ -149,14 +146,15 @@ jobs:
TAPS="$(brew --repository)/Library/Taps";
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then rm -rf "$TAPS/caskroom/homebrew-cask"; fi;
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask
HOMEBREW_NO_AUTO_UPDATE=1 brew tap macos-fuse-t/homebrew-cask
- name: Install osxfuse
- name: Install fuse-t
run: |
HOMEBREW_NO_AUTO_UPDATE=1 brew install osxfuse
HOMEBREW_NO_AUTO_UPDATE=1 brew install fuse-t
- name: Install brew other packages
run: |
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck';
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck jq';
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi; done;
- name: Install awscli2
Expand All @@ -165,10 +163,6 @@ jobs:
curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg"
sudo installer -pkg AWSCLIV2.pkg -target /
- name: Check osxfuse permission
run: |
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
- name: Build
run: |
./autogen.sh
Expand All @@ -191,8 +185,6 @@ jobs:
- name: Test suite
run: |
make check -C src
echo "user_allow_other" | sudo tee -a /etc/fuse.conf >/dev/null
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
MemoryTest:
Expand Down
24 changes: 13 additions & 11 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ CXXFLAGS="-Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3 -std=
dnl ----------------------------------------------
dnl For macOS
dnl ----------------------------------------------
found_fuse_t=no
case "$target" in
*-cygwin* )
# Do something specific for windows using winfsp
Expand All @@ -58,10 +57,17 @@ case "$target" in
;;
esac

dnl ----------------------------------------------
dnl Checking the FUSE library
dnl ----------------------------------------------
dnl Distinguish between Linux (libfuse) and macOS (FUSE-T).
dnl
found_fuse_t=no
PKG_CHECK_MODULES([FUSE_T], [fuse-t >= ${min_fuse_t_version}], [found_fuse_t=yes], [found_fuse_t=no])

AS_IF([test "x$found_fuse_t" = "xyes"],
[PKG_CHECK_MODULES([common_lib_checking], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])])
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([fuse_library_checking], [fuse-t >= ${min_fuse_t_version}])],
[PKG_CHECK_MODULES([fuse_library_checking], [fuse >= ${min_fuse_version}])])

dnl ----------------------------------------------
dnl Choice SSL library
Expand Down Expand Up @@ -188,17 +194,13 @@ AS_IF(

dnl
dnl For PKG_CONFIG before checking nss/gnutls.
dnl this is redundant checking, but we need checking before following.
dnl
AS_IF([test "x$found_fuse_t" = "xyes"],
[PKG_CHECK_MODULES([common_lib_checking], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])],
[PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])])

AC_MSG_CHECKING([compile s3fs with])
case "${auth_lib}" in
openssl)
AC_MSG_RESULT(OpenSSL)
AS_IF([test "x$found_fuse_t" = "xyes"],
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])])

Expand All @@ -218,7 +220,7 @@ gnutls)
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
AS_IF([test $gnutls_nettle = 0],
[
AS_IF([test "x$found_fuse_t" = "xyes"],
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])])
LIBS="-lgnutls -lgcrypt $LIBS"
Expand All @@ -234,7 +236,7 @@ nettle)
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
AS_IF([test $gnutls_nettle = 1],
[
AS_IF([test "x$found_fuse_t" = "xyes"],
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])])
LIBS="-lgnutls -lnettle $LIBS"
Expand All @@ -245,7 +247,7 @@ nettle)
;;
nss)
AC_MSG_RESULT(NSS)
AS_IF([test "x$found_fuse_t" = "xyes"],
AS_IF([test "$found_fuse_t" = "yes"],
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])],
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])])
;;
Expand Down
6 changes: 3 additions & 3 deletions src/curl_multi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
//-------------------------------------------------------------------
// Class S3fsMultiCurl
//-------------------------------------------------------------------
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism), SuccessCallback(nullptr), NotFoundCallback(nullptr), RetryCallback(nullptr), pSuccessCallbackParam(nullptr), pNotFoundCallbackParam(nullptr)
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism, bool not_abort) : maxParallelism(maxParallelism), not_abort(not_abort), SuccessCallback(nullptr), NotFoundCallback(nullptr), RetryCallback(nullptr), pSuccessCallbackParam(nullptr), pNotFoundCallbackParam(nullptr)
{
int result;
pthread_mutexattr_t attr;
Expand Down Expand Up @@ -276,7 +276,7 @@ int S3fsMultiCurl::MultiRead()
clist_req.push_back(std::move(s3fscurl)); // Re-evaluate at the end
iter = clist_req.begin();
}else{
if(!isRetry || 0 != result){
if(!isRetry || (!not_abort && 0 != result)){
// If an EIO error has already occurred, it will be terminated
// immediately even if retry processing is required.
s3fscurl->DestroyCurlHandle();
Expand Down Expand Up @@ -308,7 +308,7 @@ int S3fsMultiCurl::MultiRead()
}
clist_req.clear();

if(0 != result){
if(!not_abort && 0 != result){
// If an EIO error has already occurred, clear all retry objects.
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
S3fsCurl* s3fscurl = iter->get();
Expand Down
3 changes: 2 additions & 1 deletion src/curl_multi.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ class S3fsMultiCurl

s3fscurllist_t clist_all; // all of curl requests
s3fscurllist_t clist_req; // curl requests are sent
bool not_abort; // complete all requests without aborting on errors

S3fsMultiSuccessCallback SuccessCallback;
S3fsMultiNotFoundCallback NotFoundCallback;
Expand All @@ -62,7 +63,7 @@ class S3fsMultiCurl
static void* RequestPerformWrapper(void* arg);

public:
explicit S3fsMultiCurl(int maxParallelism);
explicit S3fsMultiCurl(int maxParallelism, bool not_abort = false);
~S3fsMultiCurl();

int GetMaxParallelism() const { return maxParallelism; }
Expand Down
25 changes: 23 additions & 2 deletions src/fdcache_entity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1737,7 +1737,7 @@ int FdEntity::RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath)
return result;
}
untreated_list.ClearParts(untreated_start, untreated_size);
}
}
// complete multipart uploading.
if(0 != (result = NoCacheCompleteMultipartPost(pseudo_obj))){
S3FS_PRN_ERR("failed to complete(finish) multipart post for file(physical_fd=%d).", physical_fd);
Expand Down Expand Up @@ -1812,7 +1812,8 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
mp_part_list_t to_copy_list;
mp_part_list_t to_download_list;
filepart_list_t cancel_uploaded_list;
if(!pseudo_obj->ExtractUploadPartsFromAllArea(untreated_list, to_upload_list, to_copy_list, to_download_list, cancel_uploaded_list, S3fsCurl::GetMultipartSize(), pagelist.Size(), FdEntity::mixmultipart)){
bool wait_upload_complete = false;
if(!pseudo_obj->ExtractUploadPartsFromAllArea(untreated_list, to_upload_list, to_copy_list, to_download_list, cancel_uploaded_list, wait_upload_complete, S3fsCurl::GetMultipartSize(), pagelist.Size(), FdEntity::mixmultipart)){
S3FS_PRN_ERR("Failed to extract various upload parts list from all area: errno(EIO)");
return -EIO;
}
Expand Down Expand Up @@ -1888,6 +1889,26 @@ int FdEntity::RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpat
}
}

// [NOTE]
// If there is a part where has already been uploading, that part
// is re-updated after finishing uploading, so the part of the last
// uploded must be canceled.
// (These are cancel_uploaded_list, cancellation processing means
// re-uploading the same area.)
//
// In rare cases, the completion of the previous upload and the
// re-upload may be reversed, causing the ETag to be reversed,
// in which case the upload will fail.
// To prevent this, if the upload of the same area as the re-upload
// is incomplete, we must wait for it to complete here.
//
if(wait_upload_complete){
if(0 != (result = pseudo_obj->WaitAllThreadsExit())){
S3FS_PRN_ERR("Some cancel area uploads that were waiting to complete failed with %d.", result);
return result;
}
}

//
// Upload multipart and copy parts and wait exiting them
//
Expand Down
11 changes: 9 additions & 2 deletions src/fdcache_fdinfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -763,15 +763,16 @@ bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, o
// to_upload_list : A list of areas to upload in multipart upload.
// to_copy_list : A list of areas for copy upload in multipart upload.
// to_download_list : A list of areas that must be downloaded before multipart upload.
// cancel_upload_list : A list of areas that have already been uploaded and will be canceled(overwritten).
// cancel_upload_list : A list of areas that have already been uploaded and will be canceled(overwritten).
// wait_upload_complete : If cancellation areas exist, this flag is set to true when it is necessary to wait until the upload of those cancellation areas is complete.
// file_size : The size of the upload file.
// use_copy : Specify true if copy multipart upload is available.
//
// [NOTE]
// The untreated_list in fdentity does not change, but upload_list is changed.
// (If you want to restore it, you can use cancel_upload_list.)
//
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, off_t max_mp_size, off_t file_size, bool use_copy)
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy)
{
AutoLock auto_lock(&upload_list_lock);

Expand All @@ -780,6 +781,7 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
to_copy_list.clear();
to_download_list.clear();
cancel_upload_list.clear();
wait_upload_complete = false;

// Duplicate untreated list
untreated_list_t dup_untreated_list;
Expand Down Expand Up @@ -939,6 +941,11 @@ bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list,
// So this current area only needs to be uploaded again.
//
S3FS_PRN_DBG("Cancel upload: start=%lld, size=%lld", static_cast<long long int>(overlap_uploaded_iter->startpos), static_cast<long long int>(overlap_uploaded_iter->size));

if(!overlap_uploaded_iter->uploaded){
S3FS_PRN_DBG("This cancel upload area is still uploading, so you must wait for it to complete before starting any Stream uploads.");
wait_upload_complete = true;
}
cancel_upload_list.push_back(*overlap_uploaded_iter); // add this uploaded area to cancel_upload_list
uploaded_iter = upload_list.erase(overlap_uploaded_iter); // remove it from upload_list

Expand Down
4 changes: 2 additions & 2 deletions src/fdcache_fdinfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ class PseudoFdInfo
bool CompleteInstruction(int result, AutoLock::Type type = AutoLock::NONE);
bool ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy, AutoLock::Type type = AutoLock::NONE);
bool InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag, AutoLock::Type type = AutoLock::NONE);
int WaitAllThreadsExit();
bool CancelAllThreads();
bool ExtractUploadPartsFromUntreatedArea(off_t& untreated_start, off_t& untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);

Expand Down Expand Up @@ -115,8 +114,9 @@ class PseudoFdInfo

bool ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result);

int WaitAllThreadsExit();
ssize_t UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent);
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, off_t max_mp_size, off_t file_size, bool use_copy);
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy);
};

typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
Expand Down
10 changes: 9 additions & 1 deletion src/s3fs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1035,7 +1035,11 @@ static int s3fs_getattr(const char* _path, struct stat* stbuf)
WTF8_ENCODE(path)
int result;

#if defined(__APPLE__)
FUSE_CTX_DBG("[path=%s]", path);
#else
FUSE_CTX_INFO("[path=%s]", path);
#endif

// check parent directory attribute.
if(0 != (result = check_parent_object_access(path, X_OK))){
Expand Down Expand Up @@ -3255,7 +3259,7 @@ static std::unique_ptr<S3fsCurl> multi_head_retry_callback(S3fsCurl* s3fscurl)

static int readdir_multi_head(const char* path, const S3ObjList& head, void* buf, fuse_fill_dir_t filler)
{
S3fsMultiCurl curlmulti(S3fsCurl::GetMaxMultiRequest());
S3fsMultiCurl curlmulti(S3fsCurl::GetMaxMultiRequest(), true); // [NOTE] run all requests to completion even if some requests fail.
s3obj_list_t headlist;
int result = 0;

Expand Down Expand Up @@ -3956,7 +3960,11 @@ static int s3fs_getxattr(const char* path, const char* name, char* value, size_t
static int s3fs_getxattr(const char* path, const char* name, char* value, size_t size)
#endif
{
#if defined(__APPLE__)
FUSE_CTX_DBG("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size);
#else
FUSE_CTX_INFO("[path=%s][name=%s][value=%p][size=%zu]", path, name, value, size);
#endif

if(!path || !name){
return -EIO;
Expand Down
28 changes: 18 additions & 10 deletions test/integration-test-common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,16 @@ function start_s3fs {
fi

# On OSX only, we need to specify the direct_io and auto_cache flag.
#
# And Turn off creation and reference of spotlight index.
# (Leaving spotlight ON will result in a lot of wasted requests,
# which will affect test execution time)
#
if [ "$(uname)" = "Darwin" ]; then
local DIRECT_IO_OPT="-o direct_io -o auto_cache"

# disable spotlight
sudo mdutil -a -i off
else
local DIRECT_IO_OPT=""
fi
Expand All @@ -248,14 +256,13 @@ function start_s3fs {
fi

# [NOTE]
# On macos, running s3fs via stdbuf will result in no response.
# Therefore, when it is macos, it is not executed via stdbuf.
# This patch may be temporary, but no other method has been found at this time.
# For macos fuse-t, we need to specify the "noattrcache" option to
# disable NFS caching.
#
if [ "$(uname)" = "Darwin" ]; then
local VIA_STDBUF_CMDLINE=""
local FUSE_T_ATTRCACHE_OPT="-o noattrcache"
else
local VIA_STDBUF_CMDLINE="${STDBUF_BIN} -oL -eL"
local FUSE_T_ATTRCACHE_OPT=""
fi

# [NOTE]
Expand Down Expand Up @@ -289,7 +296,7 @@ function start_s3fs {
(
set -x
CURL_CA_BUNDLE="${S3PROXY_CACERT_FILE}" \
${VIA_STDBUF_CMDLINE} \
${STDBUF_BIN} -oL -eL \
${VALGRIND_EXEC} \
${S3FS} \
${TEST_BUCKET_1} \
Expand All @@ -303,6 +310,7 @@ function start_s3fs {
${DIRECT_IO_OPT} \
${S3FS_HTTP_PROXY_OPT} \
${NO_CHECK_CERT_OPT} \
${FUSE_T_ATTRCACHE_OPT} \
-o stat_cache_expire=1 \
-o stat_cache_interval_expire=1 \
-o dbglevel="${DBGLEVEL:=info}" \
Expand All @@ -320,15 +328,15 @@ function start_s3fs {
if [ "$(uname)" = "Darwin" ]; then
local TRYCOUNT=0
while [ "${TRYCOUNT}" -le "${RETRIES:=20}" ]; do
df | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"
rc=$?
if [ "${rc}" -eq 0 ]; then
_DF_RESULT=$(df 2>/dev/null)
if echo "${_DF_RESULT}" | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"; then
break;
fi
sleep 1
TRYCOUNT=$((TRYCOUNT + 1))
done
if [ "${rc}" -ne 0 ]; then
if [ "${TRYCOUNT}" -gt "${RETRIES}" ]; then
echo "Waited ${TRYCOUNT} seconds, but it could not be mounted."
exit 1
fi
else
Expand Down
Loading

0 comments on commit 98991cc

Please sign in to comment.