lru_cache.cc 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "cache/lru_cache.h"
  10. #include <assert.h>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <string>
  14. #include "util/mutexlock.h"
  15. namespace ROCKSDB_NAMESPACE {
  16. LRUHandleTable::LRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
  17. Resize();
  18. }
  19. LRUHandleTable::~LRUHandleTable() {
  20. ApplyToAllCacheEntries([](LRUHandle* h) {
  21. if (!h->HasRefs()) {
  22. h->Free();
  23. }
  24. });
  25. delete[] list_;
  26. }
  27. LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
  28. return *FindPointer(key, hash);
  29. }
  30. LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
  31. LRUHandle** ptr = FindPointer(h->key(), h->hash);
  32. LRUHandle* old = *ptr;
  33. h->next_hash = (old == nullptr ? nullptr : old->next_hash);
  34. *ptr = h;
  35. if (old == nullptr) {
  36. ++elems_;
  37. if (elems_ > length_) {
  38. // Since each cache entry is fairly large, we aim for a small
  39. // average linked list length (<= 1).
  40. Resize();
  41. }
  42. }
  43. return old;
  44. }
  45. LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
  46. LRUHandle** ptr = FindPointer(key, hash);
  47. LRUHandle* result = *ptr;
  48. if (result != nullptr) {
  49. *ptr = result->next_hash;
  50. --elems_;
  51. }
  52. return result;
  53. }
  54. LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
  55. LRUHandle** ptr = &list_[hash & (length_ - 1)];
  56. while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
  57. ptr = &(*ptr)->next_hash;
  58. }
  59. return ptr;
  60. }
  61. void LRUHandleTable::Resize() {
  62. uint32_t new_length = 16;
  63. while (new_length < elems_ * 1.5) {
  64. new_length *= 2;
  65. }
  66. LRUHandle** new_list = new LRUHandle*[new_length];
  67. memset(new_list, 0, sizeof(new_list[0]) * new_length);
  68. uint32_t count = 0;
  69. for (uint32_t i = 0; i < length_; i++) {
  70. LRUHandle* h = list_[i];
  71. while (h != nullptr) {
  72. LRUHandle* next = h->next_hash;
  73. uint32_t hash = h->hash;
  74. LRUHandle** ptr = &new_list[hash & (new_length - 1)];
  75. h->next_hash = *ptr;
  76. *ptr = h;
  77. h = next;
  78. count++;
  79. }
  80. }
  81. assert(elems_ == count);
  82. delete[] list_;
  83. list_ = new_list;
  84. length_ = new_length;
  85. }
  86. LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
  87. double high_pri_pool_ratio,
  88. bool use_adaptive_mutex,
  89. CacheMetadataChargePolicy metadata_charge_policy)
  90. : capacity_(0),
  91. high_pri_pool_usage_(0),
  92. strict_capacity_limit_(strict_capacity_limit),
  93. high_pri_pool_ratio_(high_pri_pool_ratio),
  94. high_pri_pool_capacity_(0),
  95. usage_(0),
  96. lru_usage_(0),
  97. mutex_(use_adaptive_mutex) {
  98. set_metadata_charge_policy(metadata_charge_policy);
  99. // Make empty circular linked list
  100. lru_.next = &lru_;
  101. lru_.prev = &lru_;
  102. lru_low_pri_ = &lru_;
  103. SetCapacity(capacity);
  104. }
  105. void LRUCacheShard::EraseUnRefEntries() {
  106. autovector<LRUHandle*> last_reference_list;
  107. {
  108. MutexLock l(&mutex_);
  109. while (lru_.next != &lru_) {
  110. LRUHandle* old = lru_.next;
  111. // LRU list contains only elements which can be evicted
  112. assert(old->InCache() && !old->HasRefs());
  113. LRU_Remove(old);
  114. table_.Remove(old->key(), old->hash);
  115. old->SetInCache(false);
  116. size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_);
  117. assert(usage_ >= total_charge);
  118. usage_ -= total_charge;
  119. last_reference_list.push_back(old);
  120. }
  121. }
  122. for (auto entry : last_reference_list) {
  123. entry->Free();
  124. }
  125. }
  126. void LRUCacheShard::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
  127. bool thread_safe) {
  128. const auto applyCallback = [&]() {
  129. table_.ApplyToAllCacheEntries(
  130. [callback](LRUHandle* h) { callback(h->value, h->charge); });
  131. };
  132. if (thread_safe) {
  133. MutexLock l(&mutex_);
  134. applyCallback();
  135. } else {
  136. applyCallback();
  137. }
  138. }
  139. void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
  140. MutexLock l(&mutex_);
  141. *lru = &lru_;
  142. *lru_low_pri = lru_low_pri_;
  143. }
  144. size_t LRUCacheShard::TEST_GetLRUSize() {
  145. MutexLock l(&mutex_);
  146. LRUHandle* lru_handle = lru_.next;
  147. size_t lru_size = 0;
  148. while (lru_handle != &lru_) {
  149. lru_size++;
  150. lru_handle = lru_handle->next;
  151. }
  152. return lru_size;
  153. }
  154. double LRUCacheShard::GetHighPriPoolRatio() {
  155. MutexLock l(&mutex_);
  156. return high_pri_pool_ratio_;
  157. }
  158. void LRUCacheShard::LRU_Remove(LRUHandle* e) {
  159. assert(e->next != nullptr);
  160. assert(e->prev != nullptr);
  161. if (lru_low_pri_ == e) {
  162. lru_low_pri_ = e->prev;
  163. }
  164. e->next->prev = e->prev;
  165. e->prev->next = e->next;
  166. e->prev = e->next = nullptr;
  167. size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
  168. assert(lru_usage_ >= total_charge);
  169. lru_usage_ -= total_charge;
  170. if (e->InHighPriPool()) {
  171. assert(high_pri_pool_usage_ >= total_charge);
  172. high_pri_pool_usage_ -= total_charge;
  173. }
  174. }
  175. void LRUCacheShard::LRU_Insert(LRUHandle* e) {
  176. assert(e->next == nullptr);
  177. assert(e->prev == nullptr);
  178. size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
  179. if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
  180. // Inset "e" to head of LRU list.
  181. e->next = &lru_;
  182. e->prev = lru_.prev;
  183. e->prev->next = e;
  184. e->next->prev = e;
  185. e->SetInHighPriPool(true);
  186. high_pri_pool_usage_ += total_charge;
  187. MaintainPoolSize();
  188. } else {
  189. // Insert "e" to the head of low-pri pool. Note that when
  190. // high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
  191. e->next = lru_low_pri_->next;
  192. e->prev = lru_low_pri_;
  193. e->prev->next = e;
  194. e->next->prev = e;
  195. e->SetInHighPriPool(false);
  196. lru_low_pri_ = e;
  197. }
  198. lru_usage_ += total_charge;
  199. }
  200. void LRUCacheShard::MaintainPoolSize() {
  201. while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
  202. // Overflow last entry in high-pri pool to low-pri pool.
  203. lru_low_pri_ = lru_low_pri_->next;
  204. assert(lru_low_pri_ != &lru_);
  205. lru_low_pri_->SetInHighPriPool(false);
  206. size_t total_charge =
  207. lru_low_pri_->CalcTotalCharge(metadata_charge_policy_);
  208. assert(high_pri_pool_usage_ >= total_charge);
  209. high_pri_pool_usage_ -= total_charge;
  210. }
  211. }
  212. void LRUCacheShard::EvictFromLRU(size_t charge,
  213. autovector<LRUHandle*>* deleted) {
  214. while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
  215. LRUHandle* old = lru_.next;
  216. // LRU list contains only elements which can be evicted
  217. assert(old->InCache() && !old->HasRefs());
  218. LRU_Remove(old);
  219. table_.Remove(old->key(), old->hash);
  220. old->SetInCache(false);
  221. size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_);
  222. assert(usage_ >= old_total_charge);
  223. usage_ -= old_total_charge;
  224. deleted->push_back(old);
  225. }
  226. }
  227. void LRUCacheShard::SetCapacity(size_t capacity) {
  228. autovector<LRUHandle*> last_reference_list;
  229. {
  230. MutexLock l(&mutex_);
  231. capacity_ = capacity;
  232. high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
  233. EvictFromLRU(0, &last_reference_list);
  234. }
  235. // Free the entries outside of mutex for performance reasons
  236. for (auto entry : last_reference_list) {
  237. entry->Free();
  238. }
  239. }
  240. void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
  241. MutexLock l(&mutex_);
  242. strict_capacity_limit_ = strict_capacity_limit;
  243. }
  244. Cache::Handle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash) {
  245. MutexLock l(&mutex_);
  246. LRUHandle* e = table_.Lookup(key, hash);
  247. if (e != nullptr) {
  248. assert(e->InCache());
  249. if (!e->HasRefs()) {
  250. // The entry is in LRU since it's in hash and has no external references
  251. LRU_Remove(e);
  252. }
  253. e->Ref();
  254. e->SetHit();
  255. }
  256. return reinterpret_cast<Cache::Handle*>(e);
  257. }
  258. bool LRUCacheShard::Ref(Cache::Handle* h) {
  259. LRUHandle* e = reinterpret_cast<LRUHandle*>(h);
  260. MutexLock l(&mutex_);
  261. // To create another reference - entry must be already externally referenced
  262. assert(e->HasRefs());
  263. e->Ref();
  264. return true;
  265. }
  266. void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
  267. MutexLock l(&mutex_);
  268. high_pri_pool_ratio_ = high_pri_pool_ratio;
  269. high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
  270. MaintainPoolSize();
  271. }
  272. bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
  273. if (handle == nullptr) {
  274. return false;
  275. }
  276. LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
  277. bool last_reference = false;
  278. {
  279. MutexLock l(&mutex_);
  280. last_reference = e->Unref();
  281. if (last_reference && e->InCache()) {
  282. // The item is still in cache, and nobody else holds a reference to it
  283. if (usage_ > capacity_ || force_erase) {
  284. // The LRU list must be empty since the cache is full
  285. assert(lru_.next == &lru_ || force_erase);
  286. // Take this opportunity and remove the item
  287. table_.Remove(e->key(), e->hash);
  288. e->SetInCache(false);
  289. } else {
  290. // Put the item back on the LRU list, and don't free it
  291. LRU_Insert(e);
  292. last_reference = false;
  293. }
  294. }
  295. if (last_reference) {
  296. size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
  297. assert(usage_ >= total_charge);
  298. usage_ -= total_charge;
  299. }
  300. }
  301. // Free the entry here outside of mutex for performance reasons
  302. if (last_reference) {
  303. e->Free();
  304. }
  305. return last_reference;
  306. }
  307. Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
  308. size_t charge,
  309. void (*deleter)(const Slice& key, void* value),
  310. Cache::Handle** handle, Cache::Priority priority) {
  311. // Allocate the memory here outside of the mutex
  312. // If the cache is full, we'll have to release it
  313. // It shouldn't happen very often though.
  314. LRUHandle* e = reinterpret_cast<LRUHandle*>(
  315. new char[sizeof(LRUHandle) - 1 + key.size()]);
  316. Status s = Status::OK();
  317. autovector<LRUHandle*> last_reference_list;
  318. e->value = value;
  319. e->deleter = deleter;
  320. e->charge = charge;
  321. e->key_length = key.size();
  322. e->flags = 0;
  323. e->hash = hash;
  324. e->refs = 0;
  325. e->next = e->prev = nullptr;
  326. e->SetInCache(true);
  327. e->SetPriority(priority);
  328. memcpy(e->key_data, key.data(), key.size());
  329. size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
  330. {
  331. MutexLock l(&mutex_);
  332. // Free the space following strict LRU policy until enough space
  333. // is freed or the lru list is empty
  334. EvictFromLRU(total_charge, &last_reference_list);
  335. if ((usage_ + total_charge) > capacity_ &&
  336. (strict_capacity_limit_ || handle == nullptr)) {
  337. if (handle == nullptr) {
  338. // Don't insert the entry but still return ok, as if the entry inserted
  339. // into cache and get evicted immediately.
  340. e->SetInCache(false);
  341. last_reference_list.push_back(e);
  342. } else {
  343. delete[] reinterpret_cast<char*>(e);
  344. *handle = nullptr;
  345. s = Status::Incomplete("Insert failed due to LRU cache being full.");
  346. }
  347. } else {
  348. // Insert into the cache. Note that the cache might get larger than its
  349. // capacity if not enough space was freed up.
  350. LRUHandle* old = table_.Insert(e);
  351. usage_ += total_charge;
  352. if (old != nullptr) {
  353. assert(old->InCache());
  354. old->SetInCache(false);
  355. if (!old->HasRefs()) {
  356. // old is on LRU because it's in cache and its reference count is 0
  357. LRU_Remove(old);
  358. size_t old_total_charge =
  359. old->CalcTotalCharge(metadata_charge_policy_);
  360. assert(usage_ >= old_total_charge);
  361. usage_ -= old_total_charge;
  362. last_reference_list.push_back(old);
  363. }
  364. }
  365. if (handle == nullptr) {
  366. LRU_Insert(e);
  367. } else {
  368. e->Ref();
  369. *handle = reinterpret_cast<Cache::Handle*>(e);
  370. }
  371. }
  372. }
  373. // Free the entries here outside of mutex for performance reasons
  374. for (auto entry : last_reference_list) {
  375. entry->Free();
  376. }
  377. return s;
  378. }
  379. void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
  380. LRUHandle* e;
  381. bool last_reference = false;
  382. {
  383. MutexLock l(&mutex_);
  384. e = table_.Remove(key, hash);
  385. if (e != nullptr) {
  386. assert(e->InCache());
  387. e->SetInCache(false);
  388. if (!e->HasRefs()) {
  389. // The entry is in LRU since it's in hash and has no external references
  390. LRU_Remove(e);
  391. size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
  392. assert(usage_ >= total_charge);
  393. usage_ -= total_charge;
  394. last_reference = true;
  395. }
  396. }
  397. }
  398. // Free the entry here outside of mutex for performance reasons
  399. // last_reference will only be true if e != nullptr
  400. if (last_reference) {
  401. e->Free();
  402. }
  403. }
  404. size_t LRUCacheShard::GetUsage() const {
  405. MutexLock l(&mutex_);
  406. return usage_;
  407. }
  408. size_t LRUCacheShard::GetPinnedUsage() const {
  409. MutexLock l(&mutex_);
  410. assert(usage_ >= lru_usage_);
  411. return usage_ - lru_usage_;
  412. }
  413. std::string LRUCacheShard::GetPrintableOptions() const {
  414. const int kBufferSize = 200;
  415. char buffer[kBufferSize];
  416. {
  417. MutexLock l(&mutex_);
  418. snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
  419. high_pri_pool_ratio_);
  420. }
  421. return std::string(buffer);
  422. }
  423. LRUCache::LRUCache(size_t capacity, int num_shard_bits,
  424. bool strict_capacity_limit, double high_pri_pool_ratio,
  425. std::shared_ptr<MemoryAllocator> allocator,
  426. bool use_adaptive_mutex,
  427. CacheMetadataChargePolicy metadata_charge_policy)
  428. : ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
  429. std::move(allocator)) {
  430. num_shards_ = 1 << num_shard_bits;
  431. shards_ = reinterpret_cast<LRUCacheShard*>(
  432. port::cacheline_aligned_alloc(sizeof(LRUCacheShard) * num_shards_));
  433. size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
  434. for (int i = 0; i < num_shards_; i++) {
  435. new (&shards_[i])
  436. LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio,
  437. use_adaptive_mutex, metadata_charge_policy);
  438. }
  439. }
  440. LRUCache::~LRUCache() {
  441. if (shards_ != nullptr) {
  442. assert(num_shards_ > 0);
  443. for (int i = 0; i < num_shards_; i++) {
  444. shards_[i].~LRUCacheShard();
  445. }
  446. port::cacheline_aligned_free(shards_);
  447. }
  448. }
  449. CacheShard* LRUCache::GetShard(int shard) {
  450. return reinterpret_cast<CacheShard*>(&shards_[shard]);
  451. }
  452. const CacheShard* LRUCache::GetShard(int shard) const {
  453. return reinterpret_cast<CacheShard*>(&shards_[shard]);
  454. }
  455. void* LRUCache::Value(Handle* handle) {
  456. return reinterpret_cast<const LRUHandle*>(handle)->value;
  457. }
  458. size_t LRUCache::GetCharge(Handle* handle) const {
  459. return reinterpret_cast<const LRUHandle*>(handle)->charge;
  460. }
  461. uint32_t LRUCache::GetHash(Handle* handle) const {
  462. return reinterpret_cast<const LRUHandle*>(handle)->hash;
  463. }
  464. void LRUCache::DisownData() {
  465. // Do not drop data if compile with ASAN to suppress leak warning.
  466. #if defined(__clang__)
  467. #if !defined(__has_feature) || !__has_feature(address_sanitizer)
  468. shards_ = nullptr;
  469. num_shards_ = 0;
  470. #endif
  471. #else // __clang__
  472. #ifndef __SANITIZE_ADDRESS__
  473. shards_ = nullptr;
  474. num_shards_ = 0;
  475. #endif // !__SANITIZE_ADDRESS__
  476. #endif // __clang__
  477. }
  478. size_t LRUCache::TEST_GetLRUSize() {
  479. size_t lru_size_of_all_shards = 0;
  480. for (int i = 0; i < num_shards_; i++) {
  481. lru_size_of_all_shards += shards_[i].TEST_GetLRUSize();
  482. }
  483. return lru_size_of_all_shards;
  484. }
  485. double LRUCache::GetHighPriPoolRatio() {
  486. double result = 0.0;
  487. if (num_shards_ > 0) {
  488. result = shards_[0].GetHighPriPoolRatio();
  489. }
  490. return result;
  491. }
  492. std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
  493. return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
  494. cache_opts.strict_capacity_limit,
  495. cache_opts.high_pri_pool_ratio,
  496. cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
  497. cache_opts.metadata_charge_policy);
  498. }
  499. std::shared_ptr<Cache> NewLRUCache(
  500. size_t capacity, int num_shard_bits, bool strict_capacity_limit,
  501. double high_pri_pool_ratio,
  502. std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
  503. CacheMetadataChargePolicy metadata_charge_policy) {
  504. if (num_shard_bits >= 20) {
  505. return nullptr; // the cache cannot be sharded into too many fine pieces
  506. }
  507. if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
  508. // invalid high_pri_pool_ratio
  509. return nullptr;
  510. }
  511. if (num_shard_bits < 0) {
  512. num_shard_bits = GetDefaultCacheShardBits(capacity);
  513. }
  514. return std::make_shared<LRUCache>(
  515. capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
  516. std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy);
  517. }
  518. } // namespace ROCKSDB_NAMESPACE