lru_cache.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "cache/lru_cache.h"
  10. #include <cassert>
  11. #include <cstdint>
  12. #include <cstdio>
  13. #include <cstdlib>
  14. #include "cache/secondary_cache_adapter.h"
  15. #include "monitoring/perf_context_imp.h"
  16. #include "monitoring/statistics_impl.h"
  17. #include "port/lang.h"
  18. #include "util/distributed_mutex.h"
  19. namespace ROCKSDB_NAMESPACE {
  20. namespace lru_cache {
  21. LRUHandleTable::LRUHandleTable(int max_upper_hash_bits,
  22. MemoryAllocator* allocator)
  23. : length_bits_(/* historical starting size*/ 4),
  24. list_(new LRUHandle* [size_t{1} << length_bits_] {}),
  25. elems_(0),
  26. max_length_bits_(max_upper_hash_bits),
  27. allocator_(allocator) {}
  28. LRUHandleTable::~LRUHandleTable() {
  29. auto alloc = allocator_;
  30. ApplyToEntriesRange(
  31. [alloc](LRUHandle* h) {
  32. if (!h->HasRefs()) {
  33. h->Free(alloc);
  34. }
  35. },
  36. 0, size_t{1} << length_bits_);
  37. }
  38. LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
  39. return *FindPointer(key, hash);
  40. }
  41. LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
  42. LRUHandle** ptr = FindPointer(h->key(), h->hash);
  43. LRUHandle* old = *ptr;
  44. h->next_hash = (old == nullptr ? nullptr : old->next_hash);
  45. *ptr = h;
  46. if (old == nullptr) {
  47. ++elems_;
  48. if ((elems_ >> length_bits_) > 0) { // elems_ >= length
  49. // Since each cache entry is fairly large, we aim for a small
  50. // average linked list length (<= 1).
  51. Resize();
  52. }
  53. }
  54. return old;
  55. }
  56. LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
  57. LRUHandle** ptr = FindPointer(key, hash);
  58. LRUHandle* result = *ptr;
  59. if (result != nullptr) {
  60. *ptr = result->next_hash;
  61. --elems_;
  62. }
  63. return result;
  64. }
  65. LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
  66. LRUHandle** ptr = &list_[hash >> (32 - length_bits_)];
  67. while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
  68. ptr = &(*ptr)->next_hash;
  69. }
  70. return ptr;
  71. }
  72. void LRUHandleTable::Resize() {
  73. if (length_bits_ >= max_length_bits_) {
  74. // Due to reaching limit of hash information, if we made the table bigger,
  75. // we would allocate more addresses but only the same number would be used.
  76. return;
  77. }
  78. if (length_bits_ >= 31) {
  79. // Avoid undefined behavior shifting uint32_t by 32.
  80. return;
  81. }
  82. uint32_t old_length = uint32_t{1} << length_bits_;
  83. int new_length_bits = length_bits_ + 1;
  84. std::unique_ptr<LRUHandle*[]> new_list{
  85. new LRUHandle* [size_t{1} << new_length_bits] {}};
  86. [[maybe_unused]] uint32_t count = 0;
  87. for (uint32_t i = 0; i < old_length; i++) {
  88. LRUHandle* h = list_[i];
  89. while (h != nullptr) {
  90. LRUHandle* next = h->next_hash;
  91. uint32_t hash = h->hash;
  92. LRUHandle** ptr = &new_list[hash >> (32 - new_length_bits)];
  93. h->next_hash = *ptr;
  94. *ptr = h;
  95. h = next;
  96. count++;
  97. }
  98. }
  99. assert(elems_ == count);
  100. list_ = std::move(new_list);
  101. length_bits_ = new_length_bits;
  102. }
  103. LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
  104. double high_pri_pool_ratio,
  105. double low_pri_pool_ratio, bool use_adaptive_mutex,
  106. CacheMetadataChargePolicy metadata_charge_policy,
  107. int max_upper_hash_bits,
  108. MemoryAllocator* allocator,
  109. const Cache::EvictionCallback* eviction_callback)
  110. : CacheShardBase(metadata_charge_policy),
  111. capacity_(0),
  112. high_pri_pool_usage_(0),
  113. low_pri_pool_usage_(0),
  114. strict_capacity_limit_(strict_capacity_limit),
  115. high_pri_pool_ratio_(high_pri_pool_ratio),
  116. high_pri_pool_capacity_(0),
  117. low_pri_pool_ratio_(low_pri_pool_ratio),
  118. low_pri_pool_capacity_(0),
  119. table_(max_upper_hash_bits, allocator),
  120. usage_(0),
  121. lru_usage_(0),
  122. mutex_(use_adaptive_mutex),
  123. eviction_callback_(*eviction_callback) {
  124. // Make empty circular linked list.
  125. lru_.next = &lru_;
  126. lru_.prev = &lru_;
  127. lru_low_pri_ = &lru_;
  128. lru_bottom_pri_ = &lru_;
  129. SetCapacity(capacity);
  130. }
  131. void LRUCacheShard::EraseUnRefEntries() {
  132. autovector<LRUHandle*> last_reference_list;
  133. {
  134. DMutexLock l(mutex_);
  135. while (lru_.next != &lru_) {
  136. LRUHandle* old = lru_.next;
  137. // LRU list contains only elements which can be evicted.
  138. assert(old->InCache() && !old->HasRefs());
  139. LRU_Remove(old);
  140. table_.Remove(old->key(), old->hash);
  141. old->SetInCache(false);
  142. assert(usage_ >= old->total_charge);
  143. usage_ -= old->total_charge;
  144. last_reference_list.push_back(old);
  145. }
  146. }
  147. for (auto entry : last_reference_list) {
  148. entry->Free(table_.GetAllocator());
  149. }
  150. }
  151. void LRUCacheShard::ApplyToSomeEntries(
  152. const std::function<void(const Slice& key, Cache::ObjectPtr value,
  153. size_t charge,
  154. const Cache::CacheItemHelper* helper)>& callback,
  155. size_t average_entries_per_lock, size_t* state) {
  156. // The state is essentially going to be the starting hash, which works
  157. // nicely even if we resize between calls because we use upper-most
  158. // hash bits for table indexes.
  159. DMutexLock l(mutex_);
  160. int length_bits = table_.GetLengthBits();
  161. size_t length = size_t{1} << length_bits;
  162. assert(average_entries_per_lock > 0);
  163. // Assuming we are called with same average_entries_per_lock repeatedly,
  164. // this simplifies some logic (index_end will not overflow).
  165. assert(average_entries_per_lock < length || *state == 0);
  166. size_t index_begin = *state >> (sizeof(size_t) * 8u - length_bits);
  167. size_t index_end = index_begin + average_entries_per_lock;
  168. if (index_end >= length) {
  169. // Going to end
  170. index_end = length;
  171. *state = SIZE_MAX;
  172. } else {
  173. *state = index_end << (sizeof(size_t) * 8u - length_bits);
  174. }
  175. table_.ApplyToEntriesRange(
  176. [callback,
  177. metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) {
  178. callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
  179. h->helper);
  180. },
  181. index_begin, index_end);
  182. }
  183. void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri,
  184. LRUHandle** lru_bottom_pri) {
  185. DMutexLock l(mutex_);
  186. *lru = &lru_;
  187. *lru_low_pri = lru_low_pri_;
  188. *lru_bottom_pri = lru_bottom_pri_;
  189. }
  190. size_t LRUCacheShard::TEST_GetLRUSize() {
  191. DMutexLock l(mutex_);
  192. LRUHandle* lru_handle = lru_.next;
  193. size_t lru_size = 0;
  194. while (lru_handle != &lru_) {
  195. lru_size++;
  196. lru_handle = lru_handle->next;
  197. }
  198. return lru_size;
  199. }
  200. double LRUCacheShard::GetHighPriPoolRatio() {
  201. DMutexLock l(mutex_);
  202. return high_pri_pool_ratio_;
  203. }
  204. double LRUCacheShard::GetLowPriPoolRatio() {
  205. DMutexLock l(mutex_);
  206. return low_pri_pool_ratio_;
  207. }
  208. void LRUCacheShard::LRU_Remove(LRUHandle* e) {
  209. assert(e->next != nullptr);
  210. assert(e->prev != nullptr);
  211. if (lru_low_pri_ == e) {
  212. lru_low_pri_ = e->prev;
  213. }
  214. if (lru_bottom_pri_ == e) {
  215. lru_bottom_pri_ = e->prev;
  216. }
  217. e->next->prev = e->prev;
  218. e->prev->next = e->next;
  219. e->prev = e->next = nullptr;
  220. assert(lru_usage_ >= e->total_charge);
  221. lru_usage_ -= e->total_charge;
  222. assert(!e->InHighPriPool() || !e->InLowPriPool());
  223. if (e->InHighPriPool()) {
  224. assert(high_pri_pool_usage_ >= e->total_charge);
  225. high_pri_pool_usage_ -= e->total_charge;
  226. } else if (e->InLowPriPool()) {
  227. assert(low_pri_pool_usage_ >= e->total_charge);
  228. low_pri_pool_usage_ -= e->total_charge;
  229. }
  230. }
  231. void LRUCacheShard::LRU_Insert(LRUHandle* e) {
  232. assert(e->next == nullptr);
  233. assert(e->prev == nullptr);
  234. if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
  235. // Inset "e" to head of LRU list.
  236. e->next = &lru_;
  237. e->prev = lru_.prev;
  238. e->prev->next = e;
  239. e->next->prev = e;
  240. e->SetInHighPriPool(true);
  241. e->SetInLowPriPool(false);
  242. high_pri_pool_usage_ += e->total_charge;
  243. MaintainPoolSize();
  244. } else if (low_pri_pool_ratio_ > 0 &&
  245. (e->IsHighPri() || e->IsLowPri() || e->HasHit())) {
  246. // Insert "e" to the head of low-pri pool.
  247. e->next = lru_low_pri_->next;
  248. e->prev = lru_low_pri_;
  249. e->prev->next = e;
  250. e->next->prev = e;
  251. e->SetInHighPriPool(false);
  252. e->SetInLowPriPool(true);
  253. low_pri_pool_usage_ += e->total_charge;
  254. lru_low_pri_ = e;
  255. MaintainPoolSize();
  256. } else {
  257. // Insert "e" to the head of bottom-pri pool.
  258. e->next = lru_bottom_pri_->next;
  259. e->prev = lru_bottom_pri_;
  260. e->prev->next = e;
  261. e->next->prev = e;
  262. e->SetInHighPriPool(false);
  263. e->SetInLowPriPool(false);
  264. // if the low-pri pool is empty, lru_low_pri_ also needs to be updated.
  265. if (lru_bottom_pri_ == lru_low_pri_) {
  266. lru_low_pri_ = e;
  267. }
  268. lru_bottom_pri_ = e;
  269. }
  270. lru_usage_ += e->total_charge;
  271. }
  272. void LRUCacheShard::MaintainPoolSize() {
  273. while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
  274. // Overflow last entry in high-pri pool to low-pri pool.
  275. lru_low_pri_ = lru_low_pri_->next;
  276. assert(lru_low_pri_ != &lru_);
  277. assert(lru_low_pri_->InHighPriPool());
  278. lru_low_pri_->SetInHighPriPool(false);
  279. lru_low_pri_->SetInLowPriPool(true);
  280. assert(high_pri_pool_usage_ >= lru_low_pri_->total_charge);
  281. high_pri_pool_usage_ -= lru_low_pri_->total_charge;
  282. low_pri_pool_usage_ += lru_low_pri_->total_charge;
  283. }
  284. while (low_pri_pool_usage_ > low_pri_pool_capacity_) {
  285. // Overflow last entry in low-pri pool to bottom-pri pool.
  286. lru_bottom_pri_ = lru_bottom_pri_->next;
  287. assert(lru_bottom_pri_ != &lru_);
  288. assert(lru_bottom_pri_->InLowPriPool());
  289. lru_bottom_pri_->SetInHighPriPool(false);
  290. lru_bottom_pri_->SetInLowPriPool(false);
  291. assert(low_pri_pool_usage_ >= lru_bottom_pri_->total_charge);
  292. low_pri_pool_usage_ -= lru_bottom_pri_->total_charge;
  293. }
  294. }
  295. void LRUCacheShard::EvictFromLRU(size_t charge,
  296. autovector<LRUHandle*>* deleted) {
  297. while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
  298. LRUHandle* old = lru_.next;
  299. // LRU list contains only elements which can be evicted.
  300. assert(old->InCache() && !old->HasRefs());
  301. LRU_Remove(old);
  302. table_.Remove(old->key(), old->hash);
  303. old->SetInCache(false);
  304. assert(usage_ >= old->total_charge);
  305. usage_ -= old->total_charge;
  306. deleted->push_back(old);
  307. }
  308. }
  309. void LRUCacheShard::NotifyEvicted(
  310. const autovector<LRUHandle*>& evicted_handles) {
  311. MemoryAllocator* alloc = table_.GetAllocator();
  312. for (LRUHandle* entry : evicted_handles) {
  313. if (eviction_callback_ &&
  314. eviction_callback_(entry->key(), static_cast<Cache::Handle*>(entry),
  315. entry->HasHit())) {
  316. // Callback took ownership of obj; just free handle
  317. free(entry);
  318. } else {
  319. // Free the entries here outside of mutex for performance reasons.
  320. entry->Free(alloc);
  321. }
  322. }
  323. }
  324. void LRUCacheShard::SetCapacity(size_t capacity) {
  325. autovector<LRUHandle*> last_reference_list;
  326. {
  327. DMutexLock l(mutex_);
  328. capacity_ = capacity;
  329. high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
  330. low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
  331. EvictFromLRU(0, &last_reference_list);
  332. }
  333. NotifyEvicted(last_reference_list);
  334. }
  335. void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
  336. DMutexLock l(mutex_);
  337. strict_capacity_limit_ = strict_capacity_limit;
  338. }
  339. Status LRUCacheShard::InsertItem(LRUHandle* e, LRUHandle** handle) {
  340. Status s = Status::OK();
  341. autovector<LRUHandle*> last_reference_list;
  342. {
  343. DMutexLock l(mutex_);
  344. // Free the space following strict LRU policy until enough space
  345. // is freed or the lru list is empty.
  346. EvictFromLRU(e->total_charge, &last_reference_list);
  347. if ((usage_ + e->total_charge) > capacity_ &&
  348. (strict_capacity_limit_ || handle == nullptr)) {
  349. e->SetInCache(false);
  350. if (handle == nullptr) {
  351. // Don't insert the entry but still return ok, as if the entry inserted
  352. // into cache and get evicted immediately.
  353. last_reference_list.push_back(e);
  354. } else {
  355. free(e);
  356. e = nullptr;
  357. *handle = nullptr;
  358. s = Status::MemoryLimit("Insert failed due to LRU cache being full.");
  359. }
  360. } else {
  361. // Insert into the cache. Note that the cache might get larger than its
  362. // capacity if not enough space was freed up.
  363. LRUHandle* old = table_.Insert(e);
  364. usage_ += e->total_charge;
  365. if (old != nullptr) {
  366. s = Status::OkOverwritten();
  367. assert(old->InCache());
  368. old->SetInCache(false);
  369. if (!old->HasRefs()) {
  370. // old is on LRU because it's in cache and its reference count is 0.
  371. LRU_Remove(old);
  372. assert(usage_ >= old->total_charge);
  373. usage_ -= old->total_charge;
  374. last_reference_list.push_back(old);
  375. }
  376. }
  377. if (handle == nullptr) {
  378. LRU_Insert(e);
  379. } else {
  380. // If caller already holds a ref, no need to take one here.
  381. if (!e->HasRefs()) {
  382. e->Ref();
  383. }
  384. *handle = e;
  385. }
  386. }
  387. }
  388. NotifyEvicted(last_reference_list);
  389. return s;
  390. }
  391. LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
  392. const Cache::CacheItemHelper* /*helper*/,
  393. Cache::CreateContext* /*create_context*/,
  394. Cache::Priority /*priority*/,
  395. Statistics* /*stats*/) {
  396. DMutexLock l(mutex_);
  397. LRUHandle* e = table_.Lookup(key, hash);
  398. if (e != nullptr) {
  399. assert(e->InCache());
  400. if (!e->HasRefs()) {
  401. // The entry is in LRU since it's in hash and has no external
  402. // references.
  403. LRU_Remove(e);
  404. }
  405. e->Ref();
  406. e->SetHit();
  407. }
  408. return e;
  409. }
  410. bool LRUCacheShard::Ref(LRUHandle* e) {
  411. DMutexLock l(mutex_);
  412. // To create another reference - entry must be already externally referenced.
  413. assert(e->HasRefs());
  414. e->Ref();
  415. return true;
  416. }
  417. void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
  418. DMutexLock l(mutex_);
  419. high_pri_pool_ratio_ = high_pri_pool_ratio;
  420. high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
  421. MaintainPoolSize();
  422. }
  423. void LRUCacheShard::SetLowPriorityPoolRatio(double low_pri_pool_ratio) {
  424. DMutexLock l(mutex_);
  425. low_pri_pool_ratio_ = low_pri_pool_ratio;
  426. low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
  427. MaintainPoolSize();
  428. }
  429. bool LRUCacheShard::Release(LRUHandle* e, bool /*useful*/,
  430. bool erase_if_last_ref) {
  431. if (e == nullptr) {
  432. return false;
  433. }
  434. bool must_free;
  435. bool was_in_cache;
  436. {
  437. DMutexLock l(mutex_);
  438. must_free = e->Unref();
  439. was_in_cache = e->InCache();
  440. if (must_free && was_in_cache) {
  441. // The item is still in cache, and nobody else holds a reference to it.
  442. if (usage_ > capacity_ || erase_if_last_ref) {
  443. // The LRU list must be empty since the cache is full.
  444. assert(lru_.next == &lru_ || erase_if_last_ref);
  445. // Take this opportunity and remove the item.
  446. table_.Remove(e->key(), e->hash);
  447. e->SetInCache(false);
  448. } else {
  449. // Put the item back on the LRU list, and don't free it.
  450. LRU_Insert(e);
  451. must_free = false;
  452. }
  453. }
  454. // If about to be freed, then decrement the cache usage.
  455. if (must_free) {
  456. assert(usage_ >= e->total_charge);
  457. usage_ -= e->total_charge;
  458. }
  459. }
  460. // Free the entry here outside of mutex for performance reasons.
  461. if (must_free) {
  462. // Only call eviction callback if we're sure no one requested erasure
  463. // FIXME: disabled because of test churn
  464. if (false && was_in_cache && !erase_if_last_ref && eviction_callback_ &&
  465. eviction_callback_(e->key(), static_cast<Cache::Handle*>(e),
  466. e->HasHit())) {
  467. // Callback took ownership of obj; just free handle
  468. free(e);
  469. } else {
  470. e->Free(table_.GetAllocator());
  471. }
  472. }
  473. return must_free;
  474. }
  475. LRUHandle* LRUCacheShard::CreateHandle(const Slice& key, uint32_t hash,
  476. Cache::ObjectPtr value,
  477. const Cache::CacheItemHelper* helper,
  478. size_t charge) {
  479. assert(helper);
  480. // value == nullptr is reserved for indicating failure in SecondaryCache
  481. assert(!(helper->IsSecondaryCacheCompatible() && value == nullptr));
  482. // Allocate the memory here outside of the mutex.
  483. // If the cache is full, we'll have to release it.
  484. // It shouldn't happen very often though.
  485. LRUHandle* e =
  486. static_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
  487. e->value = value;
  488. e->m_flags = 0;
  489. e->im_flags = 0;
  490. e->helper = helper;
  491. e->key_length = key.size();
  492. e->hash = hash;
  493. e->refs = 0;
  494. e->next = e->prev = nullptr;
  495. memcpy(e->key_data, key.data(), key.size());
  496. e->CalcTotalCharge(charge, metadata_charge_policy_);
  497. return e;
  498. }
  499. Status LRUCacheShard::Insert(const Slice& key, uint32_t hash,
  500. Cache::ObjectPtr value,
  501. const Cache::CacheItemHelper* helper,
  502. size_t charge, LRUHandle** handle,
  503. Cache::Priority priority) {
  504. LRUHandle* e = CreateHandle(key, hash, value, helper, charge);
  505. e->SetPriority(priority);
  506. e->SetInCache(true);
  507. return InsertItem(e, handle);
  508. }
  509. LRUHandle* LRUCacheShard::CreateStandalone(const Slice& key, uint32_t hash,
  510. Cache::ObjectPtr value,
  511. const Cache::CacheItemHelper* helper,
  512. size_t charge,
  513. bool allow_uncharged) {
  514. LRUHandle* e = CreateHandle(key, hash, value, helper, charge);
  515. e->SetIsStandalone(true);
  516. e->Ref();
  517. autovector<LRUHandle*> last_reference_list;
  518. {
  519. DMutexLock l(mutex_);
  520. EvictFromLRU(e->total_charge, &last_reference_list);
  521. if (strict_capacity_limit_ && (usage_ + e->total_charge) > capacity_) {
  522. if (allow_uncharged) {
  523. e->total_charge = 0;
  524. } else {
  525. free(e);
  526. e = nullptr;
  527. }
  528. } else {
  529. usage_ += e->total_charge;
  530. }
  531. }
  532. NotifyEvicted(last_reference_list);
  533. return e;
  534. }
  535. void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
  536. LRUHandle* e;
  537. bool last_reference = false;
  538. {
  539. DMutexLock l(mutex_);
  540. e = table_.Remove(key, hash);
  541. if (e != nullptr) {
  542. assert(e->InCache());
  543. e->SetInCache(false);
  544. if (!e->HasRefs()) {
  545. // The entry is in LRU since it's in hash and has no external references
  546. LRU_Remove(e);
  547. assert(usage_ >= e->total_charge);
  548. usage_ -= e->total_charge;
  549. last_reference = true;
  550. }
  551. }
  552. }
  553. // Free the entry here outside of mutex for performance reasons.
  554. // last_reference will only be true if e != nullptr.
  555. if (last_reference) {
  556. e->Free(table_.GetAllocator());
  557. }
  558. }
  559. size_t LRUCacheShard::GetUsage() const {
  560. DMutexLock l(mutex_);
  561. return usage_;
  562. }
  563. size_t LRUCacheShard::GetPinnedUsage() const {
  564. DMutexLock l(mutex_);
  565. assert(usage_ >= lru_usage_);
  566. return usage_ - lru_usage_;
  567. }
  568. size_t LRUCacheShard::GetOccupancyCount() const {
  569. DMutexLock l(mutex_);
  570. return table_.GetOccupancyCount();
  571. }
  572. size_t LRUCacheShard::GetTableAddressCount() const {
  573. DMutexLock l(mutex_);
  574. return size_t{1} << table_.GetLengthBits();
  575. }
  576. void LRUCacheShard::AppendPrintableOptions(std::string& str) const {
  577. const int kBufferSize = 200;
  578. char buffer[kBufferSize];
  579. {
  580. DMutexLock l(mutex_);
  581. snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
  582. high_pri_pool_ratio_);
  583. snprintf(buffer + strlen(buffer), kBufferSize - strlen(buffer),
  584. " low_pri_pool_ratio: %.3lf\n", low_pri_pool_ratio_);
  585. }
  586. str.append(buffer);
  587. }
  588. LRUCache::LRUCache(const LRUCacheOptions& opts) : ShardedCache(opts) {
  589. size_t per_shard = GetPerShardCapacity();
  590. MemoryAllocator* alloc = memory_allocator();
  591. InitShards([&](LRUCacheShard* cs) {
  592. new (cs) LRUCacheShard(per_shard, opts.strict_capacity_limit,
  593. opts.high_pri_pool_ratio, opts.low_pri_pool_ratio,
  594. opts.use_adaptive_mutex, opts.metadata_charge_policy,
  595. /* max_upper_hash_bits */ 32 - opts.num_shard_bits,
  596. alloc, &eviction_callback_);
  597. });
  598. }
  599. Cache::ObjectPtr LRUCache::Value(Handle* handle) {
  600. auto h = static_cast<const LRUHandle*>(handle);
  601. return h->value;
  602. }
  603. size_t LRUCache::GetCharge(Handle* handle) const {
  604. return static_cast<const LRUHandle*>(handle)->GetCharge(
  605. GetShard(0).metadata_charge_policy_);
  606. }
  607. const Cache::CacheItemHelper* LRUCache::GetCacheItemHelper(
  608. Handle* handle) const {
  609. auto h = static_cast<const LRUHandle*>(handle);
  610. return h->helper;
  611. }
  612. void LRUCache::ApplyToHandle(
  613. Cache* cache, Handle* handle,
  614. const std::function<void(const Slice& key, ObjectPtr value, size_t charge,
  615. const CacheItemHelper* helper)>& callback) {
  616. auto cache_ptr = static_cast<LRUCache*>(cache);
  617. auto h = static_cast<const LRUHandle*>(handle);
  618. callback(h->key(), h->value,
  619. h->GetCharge(cache_ptr->GetShard(0).metadata_charge_policy_),
  620. h->helper);
  621. }
  622. size_t LRUCache::TEST_GetLRUSize() {
  623. return SumOverShards([](LRUCacheShard& cs) { return cs.TEST_GetLRUSize(); });
  624. }
  625. double LRUCache::GetHighPriPoolRatio() {
  626. return GetShard(0).GetHighPriPoolRatio();
  627. }
  628. } // namespace lru_cache
  629. std::shared_ptr<Cache> LRUCacheOptions::MakeSharedCache() const {
  630. if (num_shard_bits >= 20) {
  631. return nullptr; // The cache cannot be sharded into too many fine pieces.
  632. }
  633. if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
  634. // Invalid high_pri_pool_ratio
  635. return nullptr;
  636. }
  637. if (low_pri_pool_ratio < 0.0 || low_pri_pool_ratio > 1.0) {
  638. // Invalid low_pri_pool_ratio
  639. return nullptr;
  640. }
  641. if (low_pri_pool_ratio + high_pri_pool_ratio > 1.0) {
  642. // Invalid high_pri_pool_ratio and low_pri_pool_ratio combination
  643. return nullptr;
  644. }
  645. // For sanitized options
  646. LRUCacheOptions opts = *this;
  647. if (opts.num_shard_bits < 0) {
  648. opts.num_shard_bits = GetDefaultCacheShardBits(capacity);
  649. }
  650. std::shared_ptr<Cache> cache = std::make_shared<LRUCache>(opts);
  651. if (secondary_cache) {
  652. cache = std::make_shared<CacheWithSecondaryAdapter>(cache, secondary_cache);
  653. }
  654. return cache;
  655. }
  656. std::shared_ptr<RowCache> LRUCacheOptions::MakeSharedRowCache() const {
  657. if (secondary_cache) {
  658. // Not allowed for a RowCache
  659. return nullptr;
  660. }
  661. // Works while RowCache is an alias for Cache
  662. return MakeSharedCache();
  663. }
  664. } // namespace ROCKSDB_NAMESPACE