diff --git a/src/future/base_cache.rs b/src/future/base_cache.rs index 2b5e5bf9..ccbfc982 100644 --- a/src/future/base_cache.rs +++ b/src/future/base_cache.rs @@ -479,7 +479,6 @@ where ) -> (WriteOp, Instant) { self.retry_interrupted_ops().await; - let ts = self.current_time_from_expiration_clock(); let weight = self.inner.weigh(&key, &value); let op_cnt1 = Arc::new(AtomicU8::new(0)); let op_cnt2 = Arc::clone(&op_cnt1); @@ -494,6 +493,8 @@ where None }; + let ts = self.current_time_from_expiration_clock(); + // Since the cache (cht::SegmentedHashMap) employs optimistic locking // strategy, insert_with_or_modify() may get an insert/modify operation // conflicted with other concurrent hash table operations. In that case, it diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 5f39c43b..acfc7515 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -480,7 +480,6 @@ where hash: u64, value: V, ) -> (WriteOp, Instant) { - let ts = self.current_time_from_expiration_clock(); let weight = self.inner.weigh(&key, &value); let op_cnt1 = Rc::new(AtomicU8::new(0)); let op_cnt2 = Rc::clone(&op_cnt1); @@ -491,6 +490,8 @@ where let kl = self.maybe_key_lock(&key); let _klg = &kl.as_ref().map(|kl| kl.lock()); + let ts = self.current_time_from_expiration_clock(); + // Since the cache (cht::SegmentedHashMap) employs optimistic locking // strategy, insert_with_or_modify() may get an insert/modify operation // conflicted with other concurrent hash table operations. In that case, it