diff --git a/lru.h b/lru.h index c56bdaa..1017054 100644 --- a/lru.h +++ b/lru.h @@ -37,10 +37,8 @@ www.navitia.io #include #include #include -#include #include -#include #include #include @@ -149,10 +147,10 @@ struct ConcurrentLru { } }; Lru lru; - std::unique_ptr mutex{std::make_unique()}; + std::unique_ptr mutex{std::make_unique()}; std::vector::key_type> keys() const { - boost::shared_lock lock(*mutex); + std::lock_guard lock(*mutex); return lru.keys(); } @@ -166,7 +164,7 @@ struct ConcurrentLru { result_type operator()(argument_type arg) const { typename SharedPtrF::result_type future; { - std::lock_guard lock(*mutex); + std::lock_guard lock(*mutex); future = lru(arg); } // As arg might be a reference, the maybe newly created future must be run @@ -178,19 +176,17 @@ struct ConcurrentLru { // Without that a race condition could occurs if operator() // or warmup() functions are used by a thread and get_nb_** functions by another thread // in the same time - // Also we prefere to use shared_lock/shared_mutex to allow - // multiple reader and one writer in MT context size_t get_nb_cache_miss() const { - std::shared_lock lock(*mutex); + std::lock_guard lock(*mutex); return lru.get_nb_cache_miss(); } size_t get_nb_calls() const { - std::shared_lock lock(*mutex); + std::lock_guard lock(*mutex); return lru.get_nb_calls(); } size_t get_max_size() const { - std::shared_lock lock(*mutex); + std::lock_guard lock(*mutex); return lru.get_max_size(); }