Skip to content

Commit

Permalink
Removed CurlHandlerPool and simplified it
Browse files Browse the repository at this point in the history
Removed CurlHandlerPool and simplified it.
And against data race in OpenSSL, temporarily changed to not use Sessin cache.
  • Loading branch information
ggtakec committed Nov 9, 2024
1 parent ef6c213 commit 0326447
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 241 deletions.
3 changes: 2 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ Checks: '
-cert-env33-c,
-cert-err33-c,
-cert-err58-cpp,
-clang-analyzer-*,
-clang-analyzer-security.insecureAPI.strcpy,
-clang-analyzer-unix.BlockInCriticalSection,
-concurrency-mt-unsafe,
-cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-do-while,
Expand Down
1 change: 0 additions & 1 deletion src/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ s3fs_SOURCES = \
metaheader.cpp \
mpu_util.cpp \
curl.cpp \
curl_handlerpool.cpp \
curl_multi.cpp \
curl_util.cpp \
s3objlist.cpp \
Expand Down
45 changes: 12 additions & 33 deletions src/curl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
#include "curl_multi.h"
#include "curl_util.h"
#include "s3fs_auth.h"
#include "curl_handlerpool.h"
#include "s3fs_cred.h"
#include "s3fs_util.h"
#include "string_util.h"
Expand Down Expand Up @@ -99,12 +98,10 @@ constexpr char S3fsCurl::S3FS_SSL_PRIVKEY_PASSWORD[];
std::mutex S3fsCurl::curl_handles_lock;
S3fsCurl::callback_locks_t S3fsCurl::callback_locks;
bool S3fsCurl::is_initglobal_done = false;
std::unique_ptr<CurlHandlerPool> S3fsCurl::sCurlPool;
int S3fsCurl::sCurlPoolSize = 32;
CURLSH* S3fsCurl::hCurlShare = nullptr;
bool S3fsCurl::is_cert_check = true; // default
bool S3fsCurl::is_dns_cache = true; // default
bool S3fsCurl::is_ssl_session_cache= true; // default
bool S3fsCurl::is_ssl_session_cache= false;// default(This turns OFF now, but turns ON again last PR)
long S3fsCurl::connect_timeout = 300; // default
time_t S3fsCurl::readwrite_timeout = 120; // default
int S3fsCurl::retries = 5; // default
Expand Down Expand Up @@ -162,14 +159,6 @@ bool S3fsCurl::InitS3fsCurl()
if(!S3fsCurl::InitCryptMutex()){
return false;
}
// [NOTE]
// sCurlPoolSize must be over parallel(or multireq) count.
//
sCurlPoolSize = std::max({sCurlPoolSize, GetMaxParallelCount(), GetMaxMultiRequest()});
sCurlPool.reset(new CurlHandlerPool(sCurlPoolSize));
if (!sCurlPool->Init()) {
return false;
}
return true;
}

Expand All @@ -180,10 +169,6 @@ bool S3fsCurl::DestroyS3fsCurl()
if(!S3fsCurl::DestroyCryptMutex()){
result = false;
}
if(!sCurlPool->Destroy()){
result = false;
}
sCurlPool.reset();
if(!S3fsCurl::DestroyShareCurl()){
result = false;
}
Expand Down Expand Up @@ -1976,7 +1961,7 @@ bool S3fsCurl::ResetHandle()
{
bool run_once = curl_warnings_once.exchange(true);

sCurlPool->ResetHandler(hCurl.get());
curl_easy_reset(hCurl.get());

if(CURLE_OK != curl_easy_setopt(hCurl, CURLOPT_NOSIGNAL, 1)){
return false;
Expand Down Expand Up @@ -2099,43 +2084,37 @@ bool S3fsCurl::ResetHandle()
return true;
}

bool S3fsCurl::CreateCurlHandle(bool only_pool, bool remake)
bool S3fsCurl::CreateCurlHandle(bool remake)
{
const std::lock_guard<std::mutex> lock(S3fsCurl::curl_handles_lock);

if(hCurl && remake){
if(!DestroyCurlHandleHasLock(false, true)){
if(!DestroyCurlHandleHasLock(true)){
S3FS_PRN_ERR("could not destroy handle.");
return false;
}
S3FS_PRN_INFO3("already has handle, so destroyed it or restored it to pool.");
}

if(!hCurl){
if(nullptr == (hCurl = sCurlPool->GetHandler(only_pool))){
if(!only_pool){
S3FS_PRN_ERR("Failed to create handle.");
return false;
}else{
// [NOTE]
// Further initialization processing is left to lazy processing to be executed later.
// (Currently we do not use only_pool=true, but this code is remained for the future)
return true;
}
hCurl.reset(curl_easy_init());
if(!hCurl){
S3FS_PRN_ERR("Failed to create handle.");
return false;
}
}
ResetHandle();

return true;
}

bool S3fsCurl::DestroyCurlHandle(bool restore_pool, bool clear_internal_data)
bool S3fsCurl::DestroyCurlHandle(bool clear_internal_data)
{
const std::lock_guard<std::mutex> lock(S3fsCurl::curl_handles_lock);
return DestroyCurlHandleHasLock(restore_pool, clear_internal_data);
return DestroyCurlHandleHasLock(clear_internal_data);
}

bool S3fsCurl::DestroyCurlHandleHasLock(bool restore_pool, bool clear_internal_data)
bool S3fsCurl::DestroyCurlHandleHasLock(bool clear_internal_data)
{
// [NOTE]
// If type is REQTYPE::IAMCRED or REQTYPE::IAMROLE, do not clear type.
Expand All @@ -2152,7 +2131,7 @@ bool S3fsCurl::DestroyCurlHandleHasLock(bool restore_pool, bool clear_internal_d

if(hCurl){
S3fsCurl::curl_progress.erase(hCurl.get());
sCurlPool->ReturnHandler(std::move(hCurl), restore_pool);
hCurl.reset();
}else{
return false;
}
Expand Down
9 changes: 3 additions & 6 deletions src/curl.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ typedef std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> CurlUniquePtr;
//----------------------------------------------
// class S3fsCurl
//----------------------------------------------
class CurlHandlerPool;
class S3fsCred;
class S3fsCurl;

Expand Down Expand Up @@ -131,8 +130,6 @@ class S3fsCurl
std::mutex ssl_session;
} callback_locks;
static bool is_initglobal_done;
static std::unique_ptr<CurlHandlerPool> sCurlPool;
static int sCurlPoolSize;
static CURLSH* hCurlShare;
static bool is_cert_check;
static bool is_dns_cache;
Expand Down Expand Up @@ -363,9 +360,9 @@ class S3fsCurl
static bool SetIPResolveType(const char* value);

// methods
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
bool DestroyCurlHandleHasLock(bool restore_pool = true, bool clear_internal_data = true) REQUIRES(S3fsCurl::curl_handles_lock);
bool CreateCurlHandle(bool remake = false);
bool DestroyCurlHandle(bool clear_internal_data = true);
bool DestroyCurlHandleHasLock(bool clear_internal_data = true) REQUIRES(S3fsCurl::curl_handles_lock);

bool GetIAMCredentials(const char* cred_url, const char* iam_v2_token, const char* ibm_secret_access_key, std::string& response);
bool GetIAMRoleFromMetaData(const char* cred_url, const char* iam_v2_token, std::string& token);
Expand Down
114 changes: 0 additions & 114 deletions src/curl_handlerpool.cpp

This file was deleted.

74 changes: 0 additions & 74 deletions src/curl_handlerpool.h

This file was deleted.

19 changes: 17 additions & 2 deletions src/curl_multi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,22 @@ int S3fsMultiCurl::MultiPerform()
auto it = threads.find(thread_id);
it->second.first.join();
auto int_retval = it->second.second.get();
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {

// [NOTE]
// For multipart HEAD requests(ex. readdir), we need to consider the
// cases where you may get ENOENT and EPERM.
// For example, we will get ENOENT when sending a HEAD request to a
// directory of an object whose path contains a directory uploaded
// from a client other than s3fs.
// We will also get EPERM if you try to read an encrypted object
// (such as KMS) without specifying SSE or with a different KMS key.
// In these cases, if we end this method with that error result,
// the caller will not be able to continue processing.(readdir will
// fail.)
// Therefore, if those conditions are met here, avoid setting it to
// result.
//
if(!isMultiHead || (-ENOENT != int_retval && -EPERM != int_retval)){
S3FS_PRN_WARN("thread terminated with non-zero return code: %d", int_retval);
result = int_retval;
}
Expand Down Expand Up @@ -351,7 +366,7 @@ void S3fsMultiCurl::RequestPerformWrapper(S3fsCurl* s3fscurl, std::promise<int>

if(!result){
result = s3fscurl->RequestPerform();
s3fscurl->DestroyCurlHandle(true, false);
s3fscurl->DestroyCurlHandle(false);
}

const std::lock_guard<std::mutex> lock(*s3fscurl->completed_tids_lock);
Expand Down
Loading

0 comments on commit 0326447

Please sign in to comment.