cache_test.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "rocksdb/cache.h"
  10. #include <forward_list>
  11. #include <functional>
  12. #include <iostream>
  13. #include <string>
  14. #include <vector>
  15. #include "cache/clock_cache.h"
  16. #include "cache/lru_cache.h"
  17. #include "test_util/testharness.h"
  18. #include "util/coding.h"
  19. #include "util/string_util.h"
  20. namespace ROCKSDB_NAMESPACE {
  21. // Conversions between numeric keys/values and the types expected by Cache.
  22. static std::string EncodeKey(int k) {
  23. std::string result;
  24. PutFixed32(&result, k);
  25. return result;
  26. }
  27. static int DecodeKey(const Slice& k) {
  28. assert(k.size() == 4);
  29. return DecodeFixed32(k.data());
  30. }
  31. static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
  32. static int DecodeValue(void* v) {
  33. return static_cast<int>(reinterpret_cast<uintptr_t>(v));
  34. }
  35. const std::string kLRU = "lru";
  36. const std::string kClock = "clock";
  37. void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
  38. void eraseDeleter(const Slice& /*key*/, void* value) {
  39. Cache* cache = reinterpret_cast<Cache*>(value);
  40. cache->Erase("foo");
  41. }
  42. class CacheTest : public testing::TestWithParam<std::string> {
  43. public:
  44. static CacheTest* current_;
  45. static void Deleter(const Slice& key, void* v) {
  46. current_->deleted_keys_.push_back(DecodeKey(key));
  47. current_->deleted_values_.push_back(DecodeValue(v));
  48. }
  49. static const int kCacheSize = 1000;
  50. static const int kNumShardBits = 4;
  51. static const int kCacheSize2 = 100;
  52. static const int kNumShardBits2 = 2;
  53. std::vector<int> deleted_keys_;
  54. std::vector<int> deleted_values_;
  55. std::shared_ptr<Cache> cache_;
  56. std::shared_ptr<Cache> cache2_;
  57. CacheTest()
  58. : cache_(NewCache(kCacheSize, kNumShardBits, false)),
  59. cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
  60. current_ = this;
  61. }
  62. ~CacheTest() override {}
  63. std::shared_ptr<Cache> NewCache(size_t capacity) {
  64. auto type = GetParam();
  65. if (type == kLRU) {
  66. return NewLRUCache(capacity);
  67. }
  68. if (type == kClock) {
  69. return NewClockCache(capacity);
  70. }
  71. return nullptr;
  72. }
  73. std::shared_ptr<Cache> NewCache(
  74. size_t capacity, int num_shard_bits, bool strict_capacity_limit,
  75. CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
  76. auto type = GetParam();
  77. if (type == kLRU) {
  78. LRUCacheOptions co;
  79. co.capacity = capacity;
  80. co.num_shard_bits = num_shard_bits;
  81. co.strict_capacity_limit = strict_capacity_limit;
  82. co.high_pri_pool_ratio = 0;
  83. co.metadata_charge_policy = charge_policy;
  84. return NewLRUCache(co);
  85. }
  86. if (type == kClock) {
  87. return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
  88. charge_policy);
  89. }
  90. return nullptr;
  91. }
  92. int Lookup(std::shared_ptr<Cache> cache, int key) {
  93. Cache::Handle* handle = cache->Lookup(EncodeKey(key));
  94. const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
  95. if (handle != nullptr) {
  96. cache->Release(handle);
  97. }
  98. return r;
  99. }
  100. void Insert(std::shared_ptr<Cache> cache, int key, int value,
  101. int charge = 1) {
  102. cache->Insert(EncodeKey(key), EncodeValue(value), charge,
  103. &CacheTest::Deleter);
  104. }
  105. void Erase(std::shared_ptr<Cache> cache, int key) {
  106. cache->Erase(EncodeKey(key));
  107. }
  108. int Lookup(int key) {
  109. return Lookup(cache_, key);
  110. }
  111. void Insert(int key, int value, int charge = 1) {
  112. Insert(cache_, key, value, charge);
  113. }
  114. void Erase(int key) {
  115. Erase(cache_, key);
  116. }
  117. int Lookup2(int key) {
  118. return Lookup(cache2_, key);
  119. }
  120. void Insert2(int key, int value, int charge = 1) {
  121. Insert(cache2_, key, value, charge);
  122. }
  123. void Erase2(int key) {
  124. Erase(cache2_, key);
  125. }
  126. };
  127. CacheTest* CacheTest::current_;
  128. class LRUCacheTest : public CacheTest {};
  129. TEST_P(CacheTest, UsageTest) {
  130. // cache is std::shared_ptr and will be automatically cleaned up.
  131. const uint64_t kCapacity = 100000;
  132. auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
  133. auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
  134. ASSERT_EQ(0, cache->GetUsage());
  135. ASSERT_EQ(0, precise_cache->GetUsage());
  136. size_t usage = 0;
  137. char value[10] = "abcdef";
  138. // make sure everything will be cached
  139. for (int i = 1; i < 100; ++i) {
  140. std::string key(i, 'a');
  141. auto kv_size = key.size() + 5;
  142. cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter);
  143. precise_cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
  144. dumbDeleter);
  145. usage += kv_size;
  146. ASSERT_EQ(usage, cache->GetUsage());
  147. ASSERT_LT(usage, precise_cache->GetUsage());
  148. }
  149. cache->EraseUnRefEntries();
  150. precise_cache->EraseUnRefEntries();
  151. ASSERT_EQ(0, cache->GetUsage());
  152. ASSERT_EQ(0, precise_cache->GetUsage());
  153. // make sure the cache will be overloaded
  154. for (uint64_t i = 1; i < kCapacity; ++i) {
  155. auto key = ToString(i);
  156. cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
  157. dumbDeleter);
  158. precise_cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
  159. dumbDeleter);
  160. }
  161. // the usage should be close to the capacity
  162. ASSERT_GT(kCapacity, cache->GetUsage());
  163. ASSERT_GT(kCapacity, precise_cache->GetUsage());
  164. ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
  165. ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
  166. }
  167. TEST_P(CacheTest, PinnedUsageTest) {
  168. // cache is std::shared_ptr and will be automatically cleaned up.
  169. const uint64_t kCapacity = 200000;
  170. auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
  171. auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
  172. size_t pinned_usage = 0;
  173. char value[10] = "abcdef";
  174. std::forward_list<Cache::Handle*> unreleased_handles;
  175. std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
  176. // Add entries. Unpin some of them after insertion. Then, pin some of them
  177. // again. Check GetPinnedUsage().
  178. for (int i = 1; i < 100; ++i) {
  179. std::string key(i, 'a');
  180. auto kv_size = key.size() + 5;
  181. Cache::Handle* handle;
  182. Cache::Handle* handle_in_precise_cache;
  183. cache->Insert(key, reinterpret_cast<void*>(value), kv_size, dumbDeleter,
  184. &handle);
  185. assert(handle);
  186. precise_cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
  187. dumbDeleter, &handle_in_precise_cache);
  188. assert(handle_in_precise_cache);
  189. pinned_usage += kv_size;
  190. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  191. ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
  192. if (i % 2 == 0) {
  193. cache->Release(handle);
  194. precise_cache->Release(handle_in_precise_cache);
  195. pinned_usage -= kv_size;
  196. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  197. ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
  198. } else {
  199. unreleased_handles.push_front(handle);
  200. unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
  201. }
  202. if (i % 3 == 0) {
  203. unreleased_handles.push_front(cache->Lookup(key));
  204. auto x = precise_cache->Lookup(key);
  205. assert(x);
  206. unreleased_handles_in_precise_cache.push_front(x);
  207. // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
  208. // usage increased
  209. if (i % 2 == 0) {
  210. pinned_usage += kv_size;
  211. }
  212. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  213. ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
  214. }
  215. }
  216. auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
  217. ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
  218. // check that overloading the cache does not change the pinned usage
  219. for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
  220. auto key = ToString(i);
  221. cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
  222. dumbDeleter);
  223. precise_cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
  224. dumbDeleter);
  225. }
  226. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  227. ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
  228. cache->EraseUnRefEntries();
  229. precise_cache->EraseUnRefEntries();
  230. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  231. ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
  232. // release handles for pinned entries to prevent memory leaks
  233. for (auto handle : unreleased_handles) {
  234. cache->Release(handle);
  235. }
  236. for (auto handle : unreleased_handles_in_precise_cache) {
  237. precise_cache->Release(handle);
  238. }
  239. ASSERT_EQ(0, cache->GetPinnedUsage());
  240. ASSERT_EQ(0, precise_cache->GetPinnedUsage());
  241. cache->EraseUnRefEntries();
  242. precise_cache->EraseUnRefEntries();
  243. ASSERT_EQ(0, cache->GetUsage());
  244. ASSERT_EQ(0, precise_cache->GetUsage());
  245. }
  246. TEST_P(CacheTest, HitAndMiss) {
  247. ASSERT_EQ(-1, Lookup(100));
  248. Insert(100, 101);
  249. ASSERT_EQ(101, Lookup(100));
  250. ASSERT_EQ(-1, Lookup(200));
  251. ASSERT_EQ(-1, Lookup(300));
  252. Insert(200, 201);
  253. ASSERT_EQ(101, Lookup(100));
  254. ASSERT_EQ(201, Lookup(200));
  255. ASSERT_EQ(-1, Lookup(300));
  256. Insert(100, 102);
  257. ASSERT_EQ(102, Lookup(100));
  258. ASSERT_EQ(201, Lookup(200));
  259. ASSERT_EQ(-1, Lookup(300));
  260. ASSERT_EQ(1U, deleted_keys_.size());
  261. ASSERT_EQ(100, deleted_keys_[0]);
  262. ASSERT_EQ(101, deleted_values_[0]);
  263. }
  264. TEST_P(CacheTest, InsertSameKey) {
  265. Insert(1, 1);
  266. Insert(1, 2);
  267. ASSERT_EQ(2, Lookup(1));
  268. }
  269. TEST_P(CacheTest, Erase) {
  270. Erase(200);
  271. ASSERT_EQ(0U, deleted_keys_.size());
  272. Insert(100, 101);
  273. Insert(200, 201);
  274. Erase(100);
  275. ASSERT_EQ(-1, Lookup(100));
  276. ASSERT_EQ(201, Lookup(200));
  277. ASSERT_EQ(1U, deleted_keys_.size());
  278. ASSERT_EQ(100, deleted_keys_[0]);
  279. ASSERT_EQ(101, deleted_values_[0]);
  280. Erase(100);
  281. ASSERT_EQ(-1, Lookup(100));
  282. ASSERT_EQ(201, Lookup(200));
  283. ASSERT_EQ(1U, deleted_keys_.size());
  284. }
  285. TEST_P(CacheTest, EntriesArePinned) {
  286. Insert(100, 101);
  287. Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
  288. ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
  289. ASSERT_EQ(1U, cache_->GetUsage());
  290. Insert(100, 102);
  291. Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
  292. ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
  293. ASSERT_EQ(0U, deleted_keys_.size());
  294. ASSERT_EQ(2U, cache_->GetUsage());
  295. cache_->Release(h1);
  296. ASSERT_EQ(1U, deleted_keys_.size());
  297. ASSERT_EQ(100, deleted_keys_[0]);
  298. ASSERT_EQ(101, deleted_values_[0]);
  299. ASSERT_EQ(1U, cache_->GetUsage());
  300. Erase(100);
  301. ASSERT_EQ(-1, Lookup(100));
  302. ASSERT_EQ(1U, deleted_keys_.size());
  303. ASSERT_EQ(1U, cache_->GetUsage());
  304. cache_->Release(h2);
  305. ASSERT_EQ(2U, deleted_keys_.size());
  306. ASSERT_EQ(100, deleted_keys_[1]);
  307. ASSERT_EQ(102, deleted_values_[1]);
  308. ASSERT_EQ(0U, cache_->GetUsage());
  309. }
  310. TEST_P(CacheTest, EvictionPolicy) {
  311. Insert(100, 101);
  312. Insert(200, 201);
  313. // Frequently used entry must be kept around
  314. for (int i = 0; i < kCacheSize * 2; i++) {
  315. Insert(1000+i, 2000+i);
  316. ASSERT_EQ(101, Lookup(100));
  317. }
  318. ASSERT_EQ(101, Lookup(100));
  319. ASSERT_EQ(-1, Lookup(200));
  320. }
  321. TEST_P(CacheTest, ExternalRefPinsEntries) {
  322. Insert(100, 101);
  323. Cache::Handle* h = cache_->Lookup(EncodeKey(100));
  324. ASSERT_TRUE(cache_->Ref(h));
  325. ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
  326. ASSERT_EQ(1U, cache_->GetUsage());
  327. for (int i = 0; i < 3; ++i) {
  328. if (i > 0) {
  329. // First release (i == 1) corresponds to Ref(), second release (i == 2)
  330. // corresponds to Lookup(). Then, since all external refs are released,
  331. // the below insertions should push out the cache entry.
  332. cache_->Release(h);
  333. }
  334. // double cache size because the usage bit in block cache prevents 100 from
  335. // being evicted in the first kCacheSize iterations
  336. for (int j = 0; j < 2 * kCacheSize + 100; j++) {
  337. Insert(1000 + j, 2000 + j);
  338. }
  339. if (i < 2) {
  340. ASSERT_EQ(101, Lookup(100));
  341. }
  342. }
  343. ASSERT_EQ(-1, Lookup(100));
  344. }
  345. TEST_P(CacheTest, EvictionPolicyRef) {
  346. Insert(100, 101);
  347. Insert(101, 102);
  348. Insert(102, 103);
  349. Insert(103, 104);
  350. Insert(200, 101);
  351. Insert(201, 102);
  352. Insert(202, 103);
  353. Insert(203, 104);
  354. Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
  355. Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
  356. Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
  357. Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
  358. Insert(300, 101);
  359. Insert(301, 102);
  360. Insert(302, 103);
  361. Insert(303, 104);
  362. // Insert entries much more than Cache capacity
  363. for (int i = 0; i < kCacheSize * 2; i++) {
  364. Insert(1000 + i, 2000 + i);
  365. }
  366. // Check whether the entries inserted in the beginning
  367. // are evicted. Ones without extra ref are evicted and
  368. // those with are not.
  369. ASSERT_EQ(-1, Lookup(100));
  370. ASSERT_EQ(-1, Lookup(101));
  371. ASSERT_EQ(-1, Lookup(102));
  372. ASSERT_EQ(-1, Lookup(103));
  373. ASSERT_EQ(-1, Lookup(300));
  374. ASSERT_EQ(-1, Lookup(301));
  375. ASSERT_EQ(-1, Lookup(302));
  376. ASSERT_EQ(-1, Lookup(303));
  377. ASSERT_EQ(101, Lookup(200));
  378. ASSERT_EQ(102, Lookup(201));
  379. ASSERT_EQ(103, Lookup(202));
  380. ASSERT_EQ(104, Lookup(203));
  381. // Cleaning up all the handles
  382. cache_->Release(h201);
  383. cache_->Release(h202);
  384. cache_->Release(h203);
  385. cache_->Release(h204);
  386. }
  387. TEST_P(CacheTest, EvictEmptyCache) {
  388. // Insert item large than capacity to trigger eviction on empty cache.
  389. auto cache = NewCache(1, 0, false);
  390. ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
  391. }
  392. TEST_P(CacheTest, EraseFromDeleter) {
  393. // Have deleter which will erase item from cache, which will re-enter
  394. // the cache at that point.
  395. std::shared_ptr<Cache> cache = NewCache(10, 0, false);
  396. ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
  397. ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
  398. cache->Erase("bar");
  399. ASSERT_EQ(nullptr, cache->Lookup("foo"));
  400. ASSERT_EQ(nullptr, cache->Lookup("bar"));
  401. }
  402. TEST_P(CacheTest, ErasedHandleState) {
  403. // insert a key and get two handles
  404. Insert(100, 1000);
  405. Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
  406. Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
  407. ASSERT_EQ(h1, h2);
  408. ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
  409. ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
  410. // delete the key from the cache
  411. Erase(100);
  412. // can no longer find in the cache
  413. ASSERT_EQ(-1, Lookup(100));
  414. // release one handle
  415. cache_->Release(h1);
  416. // still can't find in cache
  417. ASSERT_EQ(-1, Lookup(100));
  418. cache_->Release(h2);
  419. }
  420. TEST_P(CacheTest, HeavyEntries) {
  421. // Add a bunch of light and heavy entries and then count the combined
  422. // size of items still in the cache, which must be approximately the
  423. // same as the total capacity.
  424. const int kLight = 1;
  425. const int kHeavy = 10;
  426. int added = 0;
  427. int index = 0;
  428. while (added < 2*kCacheSize) {
  429. const int weight = (index & 1) ? kLight : kHeavy;
  430. Insert(index, 1000+index, weight);
  431. added += weight;
  432. index++;
  433. }
  434. int cached_weight = 0;
  435. for (int i = 0; i < index; i++) {
  436. const int weight = (i & 1 ? kLight : kHeavy);
  437. int r = Lookup(i);
  438. if (r >= 0) {
  439. cached_weight += weight;
  440. ASSERT_EQ(1000+i, r);
  441. }
  442. }
  443. ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
  444. }
  445. TEST_P(CacheTest, NewId) {
  446. uint64_t a = cache_->NewId();
  447. uint64_t b = cache_->NewId();
  448. ASSERT_NE(a, b);
  449. }
  450. class Value {
  451. public:
  452. explicit Value(size_t v) : v_(v) { }
  453. size_t v_;
  454. };
  455. namespace {
  456. void deleter(const Slice& /*key*/, void* value) {
  457. delete static_cast<Value *>(value);
  458. }
  459. } // namespace
  460. TEST_P(CacheTest, ReleaseAndErase) {
  461. std::shared_ptr<Cache> cache = NewCache(5, 0, false);
  462. Cache::Handle* handle;
  463. Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
  464. &CacheTest::Deleter, &handle);
  465. ASSERT_TRUE(s.ok());
  466. ASSERT_EQ(5U, cache->GetCapacity());
  467. ASSERT_EQ(1U, cache->GetUsage());
  468. ASSERT_EQ(0U, deleted_keys_.size());
  469. auto erased = cache->Release(handle, true);
  470. ASSERT_TRUE(erased);
  471. // This tests that deleter has been called
  472. ASSERT_EQ(1U, deleted_keys_.size());
  473. }
  474. TEST_P(CacheTest, ReleaseWithoutErase) {
  475. std::shared_ptr<Cache> cache = NewCache(5, 0, false);
  476. Cache::Handle* handle;
  477. Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
  478. &CacheTest::Deleter, &handle);
  479. ASSERT_TRUE(s.ok());
  480. ASSERT_EQ(5U, cache->GetCapacity());
  481. ASSERT_EQ(1U, cache->GetUsage());
  482. ASSERT_EQ(0U, deleted_keys_.size());
  483. auto erased = cache->Release(handle);
  484. ASSERT_FALSE(erased);
  485. // This tests that deleter is not called. When cache has free capacity it is
  486. // not expected to immediately erase the released items.
  487. ASSERT_EQ(0U, deleted_keys_.size());
  488. }
  489. TEST_P(CacheTest, SetCapacity) {
  490. // test1: increase capacity
  491. // lets create a cache with capacity 5,
  492. // then, insert 5 elements, then increase capacity
  493. // to 10, returned capacity should be 10, usage=5
  494. std::shared_ptr<Cache> cache = NewCache(5, 0, false);
  495. std::vector<Cache::Handle*> handles(10);
  496. // Insert 5 entries, but not releasing.
  497. for (size_t i = 0; i < 5; i++) {
  498. std::string key = ToString(i+1);
  499. Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
  500. ASSERT_TRUE(s.ok());
  501. }
  502. ASSERT_EQ(5U, cache->GetCapacity());
  503. ASSERT_EQ(5U, cache->GetUsage());
  504. cache->SetCapacity(10);
  505. ASSERT_EQ(10U, cache->GetCapacity());
  506. ASSERT_EQ(5U, cache->GetUsage());
  507. // test2: decrease capacity
  508. // insert 5 more elements to cache, then release 5,
  509. // then decrease capacity to 7, final capacity should be 7
  510. // and usage should be 7
  511. for (size_t i = 5; i < 10; i++) {
  512. std::string key = ToString(i+1);
  513. Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
  514. ASSERT_TRUE(s.ok());
  515. }
  516. ASSERT_EQ(10U, cache->GetCapacity());
  517. ASSERT_EQ(10U, cache->GetUsage());
  518. for (size_t i = 0; i < 5; i++) {
  519. cache->Release(handles[i]);
  520. }
  521. ASSERT_EQ(10U, cache->GetCapacity());
  522. ASSERT_EQ(10U, cache->GetUsage());
  523. cache->SetCapacity(7);
  524. ASSERT_EQ(7, cache->GetCapacity());
  525. ASSERT_EQ(7, cache->GetUsage());
  526. // release remaining 5 to keep valgrind happy
  527. for (size_t i = 5; i < 10; i++) {
  528. cache->Release(handles[i]);
  529. }
  530. }
  531. TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
  532. // test1: set the flag to false. Insert more keys than capacity. See if they
  533. // all go through.
  534. std::shared_ptr<Cache> cache = NewCache(5, 0, false);
  535. std::vector<Cache::Handle*> handles(10);
  536. Status s;
  537. for (size_t i = 0; i < 10; i++) {
  538. std::string key = ToString(i + 1);
  539. s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
  540. ASSERT_OK(s);
  541. ASSERT_NE(nullptr, handles[i]);
  542. }
  543. ASSERT_EQ(10, cache->GetUsage());
  544. // test2: set the flag to true. Insert and check if it fails.
  545. std::string extra_key = "extra";
  546. Value* extra_value = new Value(0);
  547. cache->SetStrictCapacityLimit(true);
  548. Cache::Handle* handle;
  549. s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
  550. ASSERT_TRUE(s.IsIncomplete());
  551. ASSERT_EQ(nullptr, handle);
  552. ASSERT_EQ(10, cache->GetUsage());
  553. for (size_t i = 0; i < 10; i++) {
  554. cache->Release(handles[i]);
  555. }
  556. // test3: init with flag being true.
  557. std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
  558. for (size_t i = 0; i < 5; i++) {
  559. std::string key = ToString(i + 1);
  560. s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
  561. ASSERT_OK(s);
  562. ASSERT_NE(nullptr, handles[i]);
  563. }
  564. s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
  565. ASSERT_TRUE(s.IsIncomplete());
  566. ASSERT_EQ(nullptr, handle);
  567. // test insert without handle
  568. s = cache2->Insert(extra_key, extra_value, 1, &deleter);
  569. // AS if the key have been inserted into cache but get evicted immediately.
  570. ASSERT_OK(s);
  571. ASSERT_EQ(5, cache2->GetUsage());
  572. ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
  573. for (size_t i = 0; i < 5; i++) {
  574. cache2->Release(handles[i]);
  575. }
  576. }
  577. TEST_P(CacheTest, OverCapacity) {
  578. size_t n = 10;
  579. // a LRUCache with n entries and one shard only
  580. std::shared_ptr<Cache> cache = NewCache(n, 0, false);
  581. std::vector<Cache::Handle*> handles(n+1);
  582. // Insert n+1 entries, but not releasing.
  583. for (size_t i = 0; i < n + 1; i++) {
  584. std::string key = ToString(i+1);
  585. Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
  586. ASSERT_TRUE(s.ok());
  587. }
  588. // Guess what's in the cache now?
  589. for (size_t i = 0; i < n + 1; i++) {
  590. std::string key = ToString(i+1);
  591. auto h = cache->Lookup(key);
  592. ASSERT_TRUE(h != nullptr);
  593. if (h) cache->Release(h);
  594. }
  595. // the cache is over capacity since nothing could be evicted
  596. ASSERT_EQ(n + 1U, cache->GetUsage());
  597. for (size_t i = 0; i < n + 1; i++) {
  598. cache->Release(handles[i]);
  599. }
  600. // Make sure eviction is triggered.
  601. cache->SetCapacity(n);
  602. // cache is under capacity now since elements were released
  603. ASSERT_EQ(n, cache->GetUsage());
  604. // element 0 is evicted and the rest is there
  605. // This is consistent with the LRU policy since the element 0
  606. // was released first
  607. for (size_t i = 0; i < n + 1; i++) {
  608. std::string key = ToString(i+1);
  609. auto h = cache->Lookup(key);
  610. if (h) {
  611. ASSERT_NE(i, 0U);
  612. cache->Release(h);
  613. } else {
  614. ASSERT_EQ(i, 0U);
  615. }
  616. }
  617. }
  618. namespace {
  619. std::vector<std::pair<int, int>> callback_state;
  620. void callback(void* entry, size_t charge) {
  621. callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
  622. }
  623. };
  624. TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
  625. std::vector<std::pair<int, int>> inserted;
  626. callback_state.clear();
  627. for (int i = 0; i < 10; ++i) {
  628. Insert(i, i * 2, i + 1);
  629. inserted.push_back({i * 2, i + 1});
  630. }
  631. cache_->ApplyToAllCacheEntries(callback, true);
  632. std::sort(inserted.begin(), inserted.end());
  633. std::sort(callback_state.begin(), callback_state.end());
  634. ASSERT_TRUE(inserted == callback_state);
  635. }
  636. TEST_P(CacheTest, DefaultShardBits) {
  637. // test1: set the flag to false. Insert more keys than capacity. See if they
  638. // all go through.
  639. std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
  640. ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
  641. ASSERT_EQ(5, sc->GetNumShardBits());
  642. cache = NewLRUCache(511 * 1024L, -1, true);
  643. sc = dynamic_cast<ShardedCache*>(cache.get());
  644. ASSERT_EQ(0, sc->GetNumShardBits());
  645. cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
  646. sc = dynamic_cast<ShardedCache*>(cache.get());
  647. ASSERT_EQ(6, sc->GetNumShardBits());
  648. }
  649. TEST_P(CacheTest, GetCharge) {
  650. Insert(1, 2);
  651. Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
  652. ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
  653. ASSERT_EQ(1, cache_->GetCharge(h1));
  654. cache_->Release(h1);
  655. }
  656. #ifdef SUPPORT_CLOCK_CACHE
  657. std::shared_ptr<Cache> (*new_clock_cache_func)(
  658. size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
  659. INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
  660. testing::Values(kLRU, kClock));
  661. #else
  662. INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
  663. #endif // SUPPORT_CLOCK_CACHE
  664. INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
  665. } // namespace ROCKSDB_NAMESPACE
  666. int main(int argc, char** argv) {
  667. ::testing::InitGoogleTest(&argc, argv);
  668. return RUN_ALL_TESTS();
  669. }