cache_test.cc 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "rocksdb/cache.h"
  10. #include <forward_list>
  11. #include <functional>
  12. #include <iostream>
  13. #include <string>
  14. #include <vector>
  15. #include "cache/lru_cache.h"
  16. #include "cache/typed_cache.h"
  17. #include "port/stack_trace.h"
  18. #include "table/block_based/block_cache.h"
  19. #include "test_util/secondary_cache_test_util.h"
  20. #include "test_util/testharness.h"
  21. #include "util/coding.h"
  22. #include "util/hash_containers.h"
  23. #include "util/string_util.h"
  24. // HyperClockCache only supports 16-byte keys, so some of the tests
  25. // originally written for LRUCache do not work on the other caches.
  26. // Those tests were adapted to use 16-byte keys. We kept the original ones.
  27. // TODO: Remove the original tests if they ever become unused.
  28. namespace ROCKSDB_NAMESPACE {
  29. namespace {
  30. // Conversions between numeric keys/values and the types expected by Cache.
  31. std::string EncodeKey16Bytes(int k) {
  32. std::string result;
  33. PutFixed32(&result, k);
  34. result.append(std::string(12, 'a')); // Because we need a 16B output, we
  35. // add a 12-byte padding.
  36. return result;
  37. }
  38. int DecodeKey16Bytes(const Slice& k) {
  39. assert(k.size() == 16);
  40. return DecodeFixed32(k.data()); // Decodes only the first 4 bytes of k.
  41. }
  42. std::string EncodeKey32Bits(int k) {
  43. std::string result;
  44. PutFixed32(&result, k);
  45. return result;
  46. }
  47. int DecodeKey32Bits(const Slice& k) {
  48. assert(k.size() == 4);
  49. return DecodeFixed32(k.data());
  50. }
  51. Cache::ObjectPtr EncodeValue(uintptr_t v) {
  52. return reinterpret_cast<Cache::ObjectPtr>(v);
  53. }
  54. int DecodeValue(void* v) {
  55. return static_cast<int>(reinterpret_cast<uintptr_t>(v));
  56. }
  57. const Cache::CacheItemHelper kDumbHelper{
  58. CacheEntryRole::kMisc,
  59. [](Cache::ObjectPtr /*value*/, MemoryAllocator* /*alloc*/) {}};
  60. const Cache::CacheItemHelper kInvokeOnDeleteHelper{
  61. CacheEntryRole::kMisc,
  62. [](Cache::ObjectPtr value, MemoryAllocator* /*alloc*/) {
  63. auto& fn = *static_cast<std::function<void()>*>(value);
  64. fn();
  65. }};
  66. } // anonymous namespace
  67. class CacheTest : public testing::Test,
  68. public secondary_cache_test_util::WithCacheTypeParam {
  69. public:
  70. static CacheTest* current_;
  71. static std::string type_;
  72. static void Deleter(Cache::ObjectPtr v, MemoryAllocator*) {
  73. current_->deleted_values_.push_back(DecodeValue(v));
  74. }
  75. static const Cache::CacheItemHelper kHelper;
  76. static const int kCacheSize = 1000;
  77. static const int kNumShardBits = 4;
  78. static const int kCacheSize2 = 100;
  79. static const int kNumShardBits2 = 2;
  80. std::vector<int> deleted_values_;
  81. std::shared_ptr<Cache> cache_;
  82. std::shared_ptr<Cache> cache2_;
  83. CacheTest()
  84. : cache_(NewCache(kCacheSize, kNumShardBits, false)),
  85. cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
  86. current_ = this;
  87. type_ = GetParam();
  88. }
  89. ~CacheTest() override = default;
  90. // These functions encode/decode keys in tests cases that use
  91. // int keys.
  92. // Currently, HyperClockCache requires keys to be 16B long, whereas
  93. // LRUCache doesn't, so the encoding depends on the cache type.
  94. std::string EncodeKey(int k) {
  95. if (IsHyperClock()) {
  96. return EncodeKey16Bytes(k);
  97. } else {
  98. return EncodeKey32Bits(k);
  99. }
  100. }
  101. int DecodeKey(const Slice& k) {
  102. if (IsHyperClock()) {
  103. return DecodeKey16Bytes(k);
  104. } else {
  105. return DecodeKey32Bits(k);
  106. }
  107. }
  108. int Lookup(std::shared_ptr<Cache> cache, int key) {
  109. Cache::Handle* handle = cache->Lookup(EncodeKey(key));
  110. const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
  111. if (handle != nullptr) {
  112. cache->Release(handle);
  113. }
  114. return r;
  115. }
  116. void Insert(std::shared_ptr<Cache> cache, int key, int value,
  117. int charge = 1) {
  118. EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), &kHelper,
  119. charge, /*handle*/ nullptr, Cache::Priority::HIGH));
  120. }
  121. void Erase(std::shared_ptr<Cache> cache, int key) {
  122. cache->Erase(EncodeKey(key));
  123. }
  124. int Lookup(int key) { return Lookup(cache_, key); }
  125. void Insert(int key, int value, int charge = 1) {
  126. Insert(cache_, key, value, charge);
  127. }
  128. void Erase(int key) { Erase(cache_, key); }
  129. int Lookup2(int key) { return Lookup(cache2_, key); }
  130. void Insert2(int key, int value, int charge = 1) {
  131. Insert(cache2_, key, value, charge);
  132. }
  133. void Erase2(int key) { Erase(cache2_, key); }
  134. };
  135. const Cache::CacheItemHelper CacheTest::kHelper{CacheEntryRole::kMisc,
  136. &CacheTest::Deleter};
  137. CacheTest* CacheTest::current_;
  138. std::string CacheTest::type_;
  139. class LRUCacheTest : public CacheTest {};
  140. TEST_P(CacheTest, UsageTest) {
  141. // cache is std::shared_ptr and will be automatically cleaned up.
  142. const size_t kCapacity = 100000;
  143. auto cache = NewCache(kCapacity, 6, false, kDontChargeCacheMetadata);
  144. auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
  145. ASSERT_EQ(0, cache->GetUsage());
  146. size_t baseline_meta_usage = precise_cache->GetUsage();
  147. if (!IsHyperClock()) {
  148. ASSERT_EQ(0, baseline_meta_usage);
  149. }
  150. size_t usage = 0;
  151. char value[10] = "abcdef";
  152. // make sure everything will be cached
  153. for (int i = 1; i < 100; ++i) {
  154. std::string key = EncodeKey(i);
  155. auto kv_size = key.size() + 5;
  156. ASSERT_OK(cache->Insert(key, value, &kDumbHelper, kv_size));
  157. ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, kv_size));
  158. usage += kv_size;
  159. ASSERT_EQ(usage, cache->GetUsage());
  160. if (GetParam() == kFixedHyperClock) {
  161. ASSERT_EQ(baseline_meta_usage + usage, precise_cache->GetUsage());
  162. } else {
  163. // AutoHyperClockCache meta usage grows in proportion to lifetime
  164. // max number of entries. LRUCache in proportion to resident number of
  165. // entries, though there is an untracked component proportional to
  166. // lifetime max number of entries.
  167. ASSERT_LT(usage, precise_cache->GetUsage());
  168. }
  169. }
  170. cache->EraseUnRefEntries();
  171. precise_cache->EraseUnRefEntries();
  172. ASSERT_EQ(0, cache->GetUsage());
  173. if (GetParam() != kAutoHyperClock) {
  174. // NOTE: AutoHyperClockCache meta usage grows in proportion to lifetime
  175. // max number of entries.
  176. ASSERT_EQ(baseline_meta_usage, precise_cache->GetUsage());
  177. }
  178. // make sure the cache will be overloaded
  179. for (size_t i = 1; i < kCapacity; ++i) {
  180. std::string key = EncodeKey(static_cast<int>(1000 + i));
  181. ASSERT_OK(cache->Insert(key, value, &kDumbHelper, key.size() + 5));
  182. ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, key.size() + 5));
  183. }
  184. // the usage should be close to the capacity
  185. ASSERT_GT(kCapacity, cache->GetUsage());
  186. ASSERT_GT(kCapacity, precise_cache->GetUsage());
  187. ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
  188. if (!IsHyperClock()) {
  189. ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
  190. } else {
  191. // estimated value size of 1 is weird for clock cache, because
  192. // almost all of the capacity will be used for metadata, and due to only
  193. // using power of 2 table sizes, we might hit strict occupancy limit
  194. // before hitting capacity limit.
  195. ASSERT_LT(kCapacity * 0.80, precise_cache->GetUsage());
  196. }
  197. }
  198. // TODO: This test takes longer than expected on FixedHyperClockCache.
  199. // This is because the values size estimate at construction is too sloppy.
  200. // Fix this.
  201. // Why is it so slow? The cache is constructed with an estimate of 1, but
  202. // then the charge is claimed to be 21. This will cause the hash table
  203. // to be extremely sparse, which in turn means clock needs to scan too
  204. // many slots to find victims.
  205. TEST_P(CacheTest, PinnedUsageTest) {
  206. // cache is std::shared_ptr and will be automatically cleaned up.
  207. const size_t kCapacity = 200000;
  208. auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
  209. auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
  210. size_t baseline_meta_usage = precise_cache->GetUsage();
  211. if (!IsHyperClock()) {
  212. ASSERT_EQ(0, baseline_meta_usage);
  213. }
  214. size_t pinned_usage = 0;
  215. char value[10] = "abcdef";
  216. std::forward_list<Cache::Handle*> unreleased_handles;
  217. std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
  218. // Add entries. Unpin some of them after insertion. Then, pin some of them
  219. // again. Check GetPinnedUsage().
  220. for (int i = 1; i < 100; ++i) {
  221. std::string key = EncodeKey(i);
  222. auto kv_size = key.size() + 5;
  223. Cache::Handle* handle;
  224. Cache::Handle* handle_in_precise_cache;
  225. ASSERT_OK(cache->Insert(key, value, &kDumbHelper, kv_size, &handle));
  226. assert(handle);
  227. ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, kv_size,
  228. &handle_in_precise_cache));
  229. assert(handle_in_precise_cache);
  230. pinned_usage += kv_size;
  231. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  232. ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
  233. if (i % 2 == 0) {
  234. cache->Release(handle);
  235. precise_cache->Release(handle_in_precise_cache);
  236. pinned_usage -= kv_size;
  237. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  238. ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
  239. } else {
  240. unreleased_handles.push_front(handle);
  241. unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
  242. }
  243. if (i % 3 == 0) {
  244. unreleased_handles.push_front(cache->Lookup(key));
  245. auto x = precise_cache->Lookup(key);
  246. assert(x);
  247. unreleased_handles_in_precise_cache.push_front(x);
  248. // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
  249. // usage increased
  250. if (i % 2 == 0) {
  251. pinned_usage += kv_size;
  252. }
  253. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  254. ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
  255. }
  256. }
  257. auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
  258. ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
  259. // check that overloading the cache does not change the pinned usage
  260. for (size_t i = 1; i < 2 * kCapacity; ++i) {
  261. std::string key = EncodeKey(static_cast<int>(1000 + i));
  262. ASSERT_OK(cache->Insert(key, value, &kDumbHelper, key.size() + 5));
  263. ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, key.size() + 5));
  264. }
  265. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  266. ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
  267. cache->EraseUnRefEntries();
  268. precise_cache->EraseUnRefEntries();
  269. ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
  270. ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
  271. // release handles for pinned entries to prevent memory leaks
  272. for (auto handle : unreleased_handles) {
  273. cache->Release(handle);
  274. }
  275. for (auto handle : unreleased_handles_in_precise_cache) {
  276. precise_cache->Release(handle);
  277. }
  278. ASSERT_EQ(0, cache->GetPinnedUsage());
  279. ASSERT_EQ(0, precise_cache->GetPinnedUsage());
  280. cache->EraseUnRefEntries();
  281. precise_cache->EraseUnRefEntries();
  282. ASSERT_EQ(0, cache->GetUsage());
  283. if (GetParam() != kAutoHyperClock) {
  284. // NOTE: AutoHyperClockCache meta usage grows in proportion to lifetime
  285. // max number of entries.
  286. ASSERT_EQ(baseline_meta_usage, precise_cache->GetUsage());
  287. }
  288. }
  289. TEST_P(CacheTest, HitAndMiss) {
  290. ASSERT_EQ(-1, Lookup(100));
  291. Insert(100, 101);
  292. ASSERT_EQ(101, Lookup(100));
  293. ASSERT_EQ(-1, Lookup(200));
  294. ASSERT_EQ(-1, Lookup(300));
  295. Insert(200, 201);
  296. ASSERT_EQ(101, Lookup(100));
  297. ASSERT_EQ(201, Lookup(200));
  298. ASSERT_EQ(-1, Lookup(300));
  299. Insert(100, 102);
  300. if (IsHyperClock()) {
  301. // ClockCache usually doesn't overwrite on Insert
  302. ASSERT_EQ(101, Lookup(100));
  303. } else {
  304. ASSERT_EQ(102, Lookup(100));
  305. }
  306. ASSERT_EQ(201, Lookup(200));
  307. ASSERT_EQ(-1, Lookup(300));
  308. ASSERT_EQ(1U, deleted_values_.size());
  309. if (IsHyperClock()) {
  310. ASSERT_EQ(102, deleted_values_[0]);
  311. } else {
  312. ASSERT_EQ(101, deleted_values_[0]);
  313. }
  314. }
  315. TEST_P(CacheTest, InsertSameKey) {
  316. if (IsHyperClock()) {
  317. ROCKSDB_GTEST_BYPASS(
  318. "ClockCache doesn't guarantee Insert overwrite same key.");
  319. return;
  320. }
  321. Insert(1, 1);
  322. Insert(1, 2);
  323. ASSERT_EQ(2, Lookup(1));
  324. }
  325. TEST_P(CacheTest, Erase) {
  326. Erase(200);
  327. ASSERT_EQ(0U, deleted_values_.size());
  328. Insert(100, 101);
  329. Insert(200, 201);
  330. Erase(100);
  331. ASSERT_EQ(-1, Lookup(100));
  332. ASSERT_EQ(201, Lookup(200));
  333. ASSERT_EQ(1U, deleted_values_.size());
  334. ASSERT_EQ(101, deleted_values_[0]);
  335. Erase(100);
  336. ASSERT_EQ(-1, Lookup(100));
  337. ASSERT_EQ(201, Lookup(200));
  338. ASSERT_EQ(1U, deleted_values_.size());
  339. }
  340. TEST_P(CacheTest, EntriesArePinned) {
  341. if (IsHyperClock()) {
  342. ROCKSDB_GTEST_BYPASS(
  343. "ClockCache doesn't guarantee Insert overwrite same key.");
  344. return;
  345. }
  346. Insert(100, 101);
  347. Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
  348. ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
  349. ASSERT_EQ(1U, cache_->GetUsage());
  350. Insert(100, 102);
  351. Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
  352. ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
  353. ASSERT_EQ(0U, deleted_values_.size());
  354. ASSERT_EQ(2U, cache_->GetUsage());
  355. cache_->Release(h1);
  356. ASSERT_EQ(1U, deleted_values_.size());
  357. ASSERT_EQ(101, deleted_values_[0]);
  358. ASSERT_EQ(1U, cache_->GetUsage());
  359. Erase(100);
  360. ASSERT_EQ(-1, Lookup(100));
  361. ASSERT_EQ(1U, deleted_values_.size());
  362. ASSERT_EQ(1U, cache_->GetUsage());
  363. cache_->Release(h2);
  364. ASSERT_EQ(2U, deleted_values_.size());
  365. ASSERT_EQ(102, deleted_values_[1]);
  366. ASSERT_EQ(0U, cache_->GetUsage());
  367. }
  368. TEST_P(CacheTest, EvictionPolicy) {
  369. Insert(100, 101);
  370. Insert(200, 201);
  371. // Frequently used entry must be kept around
  372. for (int i = 0; i < 2 * kCacheSize; i++) {
  373. Insert(1000 + i, 2000 + i);
  374. ASSERT_EQ(101, Lookup(100));
  375. }
  376. ASSERT_EQ(101, Lookup(100));
  377. ASSERT_EQ(-1, Lookup(200));
  378. }
  379. TEST_P(CacheTest, ExternalRefPinsEntries) {
  380. Insert(100, 101);
  381. Cache::Handle* h = cache_->Lookup(EncodeKey(100));
  382. ASSERT_TRUE(cache_->Ref(h));
  383. ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
  384. ASSERT_EQ(1U, cache_->GetUsage());
  385. for (int i = 0; i < 3; ++i) {
  386. if (i > 0) {
  387. // First release (i == 1) corresponds to Ref(), second release (i == 2)
  388. // corresponds to Lookup(). Then, since all external refs are released,
  389. // the below insertions should push out the cache entry.
  390. cache_->Release(h);
  391. }
  392. // double cache size because the usage bit in block cache prevents 100 from
  393. // being evicted in the first kCacheSize iterations
  394. for (int j = 0; j < 2 * kCacheSize + 100; j++) {
  395. Insert(1000 + j, 2000 + j);
  396. }
  397. // Clock cache is even more stateful and needs more churn to evict
  398. if (IsHyperClock()) {
  399. for (int j = 0; j < kCacheSize; j++) {
  400. Insert(11000 + j, 11000 + j);
  401. }
  402. }
  403. if (i < 2) {
  404. ASSERT_EQ(101, Lookup(100));
  405. }
  406. }
  407. ASSERT_EQ(-1, Lookup(100));
  408. }
  409. TEST_P(CacheTest, EvictionPolicyRef) {
  410. Insert(100, 101);
  411. Insert(101, 102);
  412. Insert(102, 103);
  413. Insert(103, 104);
  414. Insert(200, 101);
  415. Insert(201, 102);
  416. Insert(202, 103);
  417. Insert(203, 104);
  418. Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
  419. Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
  420. Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
  421. Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
  422. Insert(300, 101);
  423. Insert(301, 102);
  424. Insert(302, 103);
  425. Insert(303, 104);
  426. // Insert entries much more than cache capacity.
  427. for (int i = 0; i < 100 * kCacheSize; i++) {
  428. Insert(1000 + i, 2000 + i);
  429. }
  430. // Check whether the entries inserted in the beginning
  431. // are evicted. Ones without extra ref are evicted and
  432. // those with are not.
  433. EXPECT_EQ(-1, Lookup(100));
  434. EXPECT_EQ(-1, Lookup(101));
  435. EXPECT_EQ(-1, Lookup(102));
  436. EXPECT_EQ(-1, Lookup(103));
  437. EXPECT_EQ(-1, Lookup(300));
  438. EXPECT_EQ(-1, Lookup(301));
  439. EXPECT_EQ(-1, Lookup(302));
  440. EXPECT_EQ(-1, Lookup(303));
  441. EXPECT_EQ(101, Lookup(200));
  442. EXPECT_EQ(102, Lookup(201));
  443. EXPECT_EQ(103, Lookup(202));
  444. EXPECT_EQ(104, Lookup(203));
  445. // Cleaning up all the handles
  446. cache_->Release(h201);
  447. cache_->Release(h202);
  448. cache_->Release(h203);
  449. cache_->Release(h204);
  450. }
  451. TEST_P(CacheTest, EvictEmptyCache) {
  452. // Insert item large than capacity to trigger eviction on empty cache.
  453. auto cache = NewCache(1, 0, false);
  454. ASSERT_OK(cache->Insert(EncodeKey(1000), nullptr, &kDumbHelper, 10));
  455. }
  456. TEST_P(CacheTest, EraseFromDeleter) {
  457. // Have deleter which will erase item from cache, which will re-enter
  458. // the cache at that point.
  459. std::shared_ptr<Cache> cache = NewCache(10, 0, false);
  460. std::string foo = EncodeKey(1234);
  461. std::string bar = EncodeKey(5678);
  462. std::function<void()> erase_fn = [&]() { cache->Erase(foo); };
  463. ASSERT_OK(cache->Insert(foo, nullptr, &kDumbHelper, 1));
  464. ASSERT_OK(cache->Insert(bar, &erase_fn, &kInvokeOnDeleteHelper, 1));
  465. cache->Erase(bar);
  466. ASSERT_EQ(nullptr, cache->Lookup(foo));
  467. ASSERT_EQ(nullptr, cache->Lookup(bar));
  468. }
  469. TEST_P(CacheTest, ErasedHandleState) {
  470. // insert a key and get two handles
  471. Insert(100, 1000);
  472. Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
  473. Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
  474. ASSERT_EQ(h1, h2);
  475. ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
  476. ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
  477. // delete the key from the cache
  478. Erase(100);
  479. // can no longer find in the cache
  480. ASSERT_EQ(-1, Lookup(100));
  481. // release one handle
  482. cache_->Release(h1);
  483. // still can't find in cache
  484. ASSERT_EQ(-1, Lookup(100));
  485. cache_->Release(h2);
  486. }
  487. TEST_P(CacheTest, HeavyEntries) {
  488. // Add a bunch of light and heavy entries and then count the combined
  489. // size of items still in the cache, which must be approximately the
  490. // same as the total capacity.
  491. const int kLight = 1;
  492. const int kHeavy = 10;
  493. int added = 0;
  494. int index = 0;
  495. while (added < 2 * kCacheSize) {
  496. const int weight = (index & 1) ? kLight : kHeavy;
  497. Insert(index, 1000 + index, weight);
  498. added += weight;
  499. index++;
  500. }
  501. int cached_weight = 0;
  502. for (int i = 0; i < index; i++) {
  503. const int weight = (i & 1 ? kLight : kHeavy);
  504. int r = Lookup(i);
  505. if (r >= 0) {
  506. cached_weight += weight;
  507. ASSERT_EQ(1000 + i, r);
  508. }
  509. }
  510. ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
  511. }
  512. TEST_P(CacheTest, NewId) {
  513. uint64_t a = cache_->NewId();
  514. uint64_t b = cache_->NewId();
  515. ASSERT_NE(a, b);
  516. }
  517. TEST_P(CacheTest, ReleaseAndErase) {
  518. std::shared_ptr<Cache> cache = NewCache(5, 0, false);
  519. Cache::Handle* handle;
  520. Status s =
  521. cache->Insert(EncodeKey(100), EncodeValue(100), &kHelper, 1, &handle);
  522. ASSERT_TRUE(s.ok());
  523. ASSERT_EQ(5U, cache->GetCapacity());
  524. ASSERT_EQ(1U, cache->GetUsage());
  525. ASSERT_EQ(0U, deleted_values_.size());
  526. auto erased = cache->Release(handle, true);
  527. ASSERT_TRUE(erased);
  528. // This tests that deleter has been called
  529. ASSERT_EQ(1U, deleted_values_.size());
  530. }
  531. TEST_P(CacheTest, ReleaseWithoutErase) {
  532. std::shared_ptr<Cache> cache = NewCache(5, 0, false);
  533. Cache::Handle* handle;
  534. Status s =
  535. cache->Insert(EncodeKey(100), EncodeValue(100), &kHelper, 1, &handle);
  536. ASSERT_TRUE(s.ok());
  537. ASSERT_EQ(5U, cache->GetCapacity());
  538. ASSERT_EQ(1U, cache->GetUsage());
  539. ASSERT_EQ(0U, deleted_values_.size());
  540. auto erased = cache->Release(handle);
  541. ASSERT_FALSE(erased);
  542. // This tests that deleter is not called. When cache has free capacity it is
  543. // not expected to immediately erase the released items.
  544. ASSERT_EQ(0U, deleted_values_.size());
  545. }
  546. namespace {
  547. class Value {
  548. public:
  549. explicit Value(int v) : v_(v) {}
  550. int v_;
  551. static constexpr auto kCacheEntryRole = CacheEntryRole::kMisc;
  552. };
  553. using SharedCache = BasicTypedSharedCacheInterface<Value>;
  554. using TypedHandle = SharedCache::TypedHandle;
  555. } // namespace
  556. TEST_P(CacheTest, SetCapacity) {
  557. if (IsHyperClock()) {
  558. // TODO: update test & code for limited supoort
  559. ROCKSDB_GTEST_BYPASS(
  560. "HyperClockCache doesn't support arbitrary capacity "
  561. "adjustments.");
  562. return;
  563. }
  564. // test1: increase capacity
  565. // lets create a cache with capacity 5,
  566. // then, insert 5 elements, then increase capacity
  567. // to 10, returned capacity should be 10, usage=5
  568. SharedCache cache{NewCache(5, 0, false)};
  569. std::vector<TypedHandle*> handles(10);
  570. // Insert 5 entries, but not releasing.
  571. for (int i = 0; i < 5; i++) {
  572. std::string key = EncodeKey(i + 1);
  573. Status s = cache.Insert(key, new Value(i + 1), 1, &handles[i]);
  574. ASSERT_TRUE(s.ok());
  575. }
  576. ASSERT_EQ(5U, cache.get()->GetCapacity());
  577. ASSERT_EQ(5U, cache.get()->GetUsage());
  578. cache.get()->SetCapacity(10);
  579. ASSERT_EQ(10U, cache.get()->GetCapacity());
  580. ASSERT_EQ(5U, cache.get()->GetUsage());
  581. // test2: decrease capacity
  582. // insert 5 more elements to cache, then release 5,
  583. // then decrease capacity to 7, final capacity should be 7
  584. // and usage should be 7
  585. for (int i = 5; i < 10; i++) {
  586. std::string key = EncodeKey(i + 1);
  587. Status s = cache.Insert(key, new Value(i + 1), 1, &handles[i]);
  588. ASSERT_TRUE(s.ok());
  589. }
  590. ASSERT_EQ(10U, cache.get()->GetCapacity());
  591. ASSERT_EQ(10U, cache.get()->GetUsage());
  592. for (int i = 0; i < 5; i++) {
  593. cache.Release(handles[i]);
  594. }
  595. ASSERT_EQ(10U, cache.get()->GetCapacity());
  596. ASSERT_EQ(10U, cache.get()->GetUsage());
  597. cache.get()->SetCapacity(7);
  598. ASSERT_EQ(7, cache.get()->GetCapacity());
  599. ASSERT_EQ(7, cache.get()->GetUsage());
  600. // release remaining 5 to keep valgrind happy
  601. for (int i = 5; i < 10; i++) {
  602. cache.Release(handles[i]);
  603. }
  604. // Make sure this doesn't crash or upset ASAN/valgrind
  605. cache.get()->DisownData();
  606. }
  607. TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
  608. // test1: set the flag to false. Insert more keys than capacity. See if they
  609. // all go through.
  610. SharedCache cache{NewCache(5, 0, false)};
  611. std::vector<TypedHandle*> handles(10);
  612. Status s;
  613. for (int i = 0; i < 10; i++) {
  614. std::string key = EncodeKey(i + 1);
  615. s = cache.Insert(key, new Value(i + 1), 1, &handles[i]);
  616. ASSERT_OK(s);
  617. ASSERT_NE(nullptr, handles[i]);
  618. }
  619. ASSERT_EQ(10, cache.get()->GetUsage());
  620. // test2: set the flag to true. Insert and check if it fails.
  621. std::string extra_key = EncodeKey(100);
  622. Value* extra_value = new Value(0);
  623. cache.get()->SetStrictCapacityLimit(true);
  624. TypedHandle* handle;
  625. s = cache.Insert(extra_key, extra_value, 1, &handle);
  626. ASSERT_TRUE(s.IsMemoryLimit());
  627. ASSERT_EQ(nullptr, handle);
  628. ASSERT_EQ(10, cache.get()->GetUsage());
  629. for (int i = 0; i < 10; i++) {
  630. cache.Release(handles[i]);
  631. }
  632. // test3: init with flag being true.
  633. SharedCache cache2{NewCache(5, 0, true)};
  634. for (int i = 0; i < 5; i++) {
  635. std::string key = EncodeKey(i + 1);
  636. s = cache2.Insert(key, new Value(i + 1), 1, &handles[i]);
  637. ASSERT_OK(s);
  638. ASSERT_NE(nullptr, handles[i]);
  639. }
  640. s = cache2.Insert(extra_key, extra_value, 1, &handle);
  641. ASSERT_TRUE(s.IsMemoryLimit());
  642. ASSERT_EQ(nullptr, handle);
  643. // test insert without handle
  644. s = cache2.Insert(extra_key, extra_value, 1);
  645. // AS if the key have been inserted into cache but get evicted immediately.
  646. ASSERT_OK(s);
  647. ASSERT_EQ(5, cache2.get()->GetUsage());
  648. ASSERT_EQ(nullptr, cache2.Lookup(extra_key));
  649. for (int i = 0; i < 5; i++) {
  650. cache2.Release(handles[i]);
  651. }
  652. }
  653. TEST_P(CacheTest, OverCapacity) {
  654. size_t n = 10;
  655. // a LRUCache with n entries and one shard only
  656. SharedCache cache{NewCache(n, 0, false)};
  657. std::vector<TypedHandle*> handles(n + 1);
  658. // Insert n+1 entries, but not releasing.
  659. for (int i = 0; i < static_cast<int>(n + 1); i++) {
  660. std::string key = EncodeKey(i + 1);
  661. Status s = cache.Insert(key, new Value(i + 1), 1, &handles[i]);
  662. ASSERT_TRUE(s.ok());
  663. }
  664. // Guess what's in the cache now?
  665. for (int i = 0; i < static_cast<int>(n + 1); i++) {
  666. std::string key = EncodeKey(i + 1);
  667. auto h = cache.Lookup(key);
  668. ASSERT_TRUE(h != nullptr);
  669. if (h) {
  670. cache.Release(h);
  671. }
  672. }
  673. // the cache is over capacity since nothing could be evicted
  674. ASSERT_EQ(n + 1U, cache.get()->GetUsage());
  675. for (int i = 0; i < static_cast<int>(n + 1); i++) {
  676. cache.Release(handles[i]);
  677. }
  678. if (IsHyperClock()) {
  679. // Make sure eviction is triggered.
  680. ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, handles.data()));
  681. // cache is under capacity now since elements were released
  682. ASSERT_GE(n, cache.get()->GetUsage());
  683. // clean up
  684. cache.Release(handles[0]);
  685. } else {
  686. // LRUCache checks for over-capacity in Release.
  687. // cache is exactly at capacity now with minimal eviction
  688. ASSERT_EQ(n, cache.get()->GetUsage());
  689. // element 0 is evicted and the rest is there
  690. // This is consistent with the LRU policy since the element 0
  691. // was released first
  692. for (int i = 0; i < static_cast<int>(n + 1); i++) {
  693. std::string key = EncodeKey(i + 1);
  694. auto h = cache.Lookup(key);
  695. if (h) {
  696. ASSERT_NE(static_cast<size_t>(i), 0U);
  697. cache.Release(h);
  698. } else {
  699. ASSERT_EQ(static_cast<size_t>(i), 0U);
  700. }
  701. }
  702. }
  703. }
  704. TEST_P(CacheTest, ApplyToAllEntriesTest) {
  705. std::vector<std::string> callback_state;
  706. const auto callback = [&](const Slice& key, Cache::ObjectPtr value,
  707. size_t charge,
  708. const Cache::CacheItemHelper* helper) {
  709. callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
  710. std::to_string(DecodeValue(value)) + "," +
  711. std::to_string(charge));
  712. assert(helper == &CacheTest::kHelper);
  713. };
  714. std::vector<std::string> inserted;
  715. callback_state.clear();
  716. for (int i = 0; i < 10; ++i) {
  717. Insert(i, i * 2, i + 1);
  718. inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," +
  719. std::to_string(i + 1));
  720. }
  721. cache_->ApplyToAllEntries(callback, /*opts*/ {});
  722. std::sort(inserted.begin(), inserted.end());
  723. std::sort(callback_state.begin(), callback_state.end());
  724. ASSERT_EQ(inserted.size(), callback_state.size());
  725. for (int i = 0; i < static_cast<int>(inserted.size()); ++i) {
  726. EXPECT_EQ(inserted[i], callback_state[i]);
  727. }
  728. }
  729. TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
  730. // This is a mini-stress test of ApplyToAllEntries, to ensure
  731. // items in the cache that are neither added nor removed
  732. // during ApplyToAllEntries are counted exactly once.
  733. // Insert some entries that we expect to be seen exactly once
  734. // during iteration.
  735. constexpr int kSpecialCharge = 2;
  736. constexpr int kNotSpecialCharge = 1;
  737. constexpr int kSpecialCount = 100;
  738. size_t expected_usage = 0;
  739. for (int i = 0; i < kSpecialCount; ++i) {
  740. Insert(i, i * 2, kSpecialCharge);
  741. expected_usage += kSpecialCharge;
  742. }
  743. // For callback
  744. int special_count = 0;
  745. const auto callback = [&](const Slice&, Cache::ObjectPtr, size_t charge,
  746. const Cache::CacheItemHelper*) {
  747. if (charge == static_cast<size_t>(kSpecialCharge)) {
  748. ++special_count;
  749. }
  750. };
  751. // Start counting
  752. std::thread apply_thread([&]() {
  753. // Use small average_entries_per_lock to make the problem difficult
  754. Cache::ApplyToAllEntriesOptions opts;
  755. opts.average_entries_per_lock = 2;
  756. cache_->ApplyToAllEntries(callback, opts);
  757. });
  758. // In parallel, add more entries, enough to cause resize but not enough
  759. // to cause ejections. (Note: if any cache shard is over capacity, there
  760. // will be ejections)
  761. for (int i = kSpecialCount * 1; i < kSpecialCount * 5; ++i) {
  762. Insert(i, i * 2, kNotSpecialCharge);
  763. expected_usage += kNotSpecialCharge;
  764. }
  765. apply_thread.join();
  766. // verify no evictions
  767. ASSERT_EQ(cache_->GetUsage(), expected_usage);
  768. // verify everything seen in ApplyToAllEntries
  769. ASSERT_EQ(special_count, kSpecialCount);
  770. }
  771. TEST_P(CacheTest, ApplyToHandleTest) {
  772. std::string callback_state;
  773. const auto callback = [&](const Slice& key, Cache::ObjectPtr value,
  774. size_t charge,
  775. const Cache::CacheItemHelper* helper) {
  776. callback_state = std::to_string(DecodeKey(key)) + "," +
  777. std::to_string(DecodeValue(value)) + "," +
  778. std::to_string(charge);
  779. assert(helper == &CacheTest::kHelper);
  780. };
  781. std::vector<std::string> inserted;
  782. for (int i = 0; i < 10; ++i) {
  783. Insert(i, i * 2, i + 1);
  784. inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," +
  785. std::to_string(i + 1));
  786. }
  787. for (int i = 0; i < 10; ++i) {
  788. Cache::Handle* handle = cache_->Lookup(EncodeKey(i));
  789. cache_->ApplyToHandle(cache_.get(), handle, callback);
  790. EXPECT_EQ(inserted[i], callback_state);
  791. cache_->Release(handle);
  792. }
  793. }
  794. TEST_P(CacheTest, DefaultShardBits) {
  795. // Prevent excessive allocation (to save time & space)
  796. estimated_value_size_ = 100000;
  797. // Implementations use different minimum shard sizes
  798. size_t min_shard_size = (IsHyperClock() ? 32U * 1024U : 512U) * 1024U;
  799. std::shared_ptr<Cache> cache = NewCache(32U * min_shard_size);
  800. ShardedCacheBase* sc = dynamic_cast<ShardedCacheBase*>(cache.get());
  801. ASSERT_EQ(5, sc->GetNumShardBits());
  802. cache = NewCache(min_shard_size / 1000U * 999U);
  803. sc = dynamic_cast<ShardedCacheBase*>(cache.get());
  804. ASSERT_EQ(0, sc->GetNumShardBits());
  805. cache = NewCache(3U * 1024U * 1024U * 1024U);
  806. sc = dynamic_cast<ShardedCacheBase*>(cache.get());
  807. // current maximum of 6
  808. ASSERT_EQ(6, sc->GetNumShardBits());
  809. if constexpr (sizeof(size_t) > 4) {
  810. cache = NewCache(128U * min_shard_size);
  811. sc = dynamic_cast<ShardedCacheBase*>(cache.get());
  812. // current maximum of 6
  813. ASSERT_EQ(6, sc->GetNumShardBits());
  814. }
  815. }
  816. TEST_P(CacheTest, GetChargeAndDeleter) {
  817. Insert(1, 2);
  818. Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
  819. ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
  820. ASSERT_EQ(1, cache_->GetCharge(h1));
  821. ASSERT_EQ(&CacheTest::kHelper, cache_->GetCacheItemHelper(h1));
  822. cache_->Release(h1);
  823. }
  824. namespace {
  825. bool AreTwoCacheKeysOrdered(Cache* cache) {
  826. std::vector<std::string> keys;
  827. const auto callback = [&](const Slice& key, Cache::ObjectPtr /*value*/,
  828. size_t /*charge*/,
  829. const Cache::CacheItemHelper* /*helper*/) {
  830. keys.push_back(key.ToString());
  831. };
  832. cache->ApplyToAllEntries(callback, /*opts*/ {});
  833. EXPECT_EQ(keys.size(), 2U);
  834. EXPECT_NE(keys[0], keys[1]);
  835. return keys[0] < keys[1];
  836. }
  837. } // namespace
  838. TEST_P(CacheTest, CacheUniqueSeeds) {
  839. // kQuasiRandomHashSeed should generate unique seeds (up to 2 billion before
  840. // repeating)
  841. UnorderedSet<uint32_t> seeds_seen;
  842. // Roughly sqrt(number of possible values) for a decent chance at detecting
  843. // a random collision if it's possible (shouldn't be)
  844. uint16_t kSamples = 20000;
  845. seeds_seen.reserve(kSamples);
  846. // Hash seed should affect ordering of entries in the table, so we should
  847. // have extremely high chance of seeing two entries ordered both ways.
  848. bool seen_forward_order = false;
  849. bool seen_reverse_order = false;
  850. for (int i = 0; i < kSamples; ++i) {
  851. auto cache = NewCache(2, [=](ShardedCacheOptions& opts) {
  852. opts.hash_seed = LRUCacheOptions::kQuasiRandomHashSeed;
  853. opts.num_shard_bits = 0;
  854. opts.metadata_charge_policy = kDontChargeCacheMetadata;
  855. });
  856. auto val = cache->GetHashSeed();
  857. ASSERT_TRUE(seeds_seen.insert(val).second);
  858. ASSERT_OK(cache->Insert(EncodeKey(1), nullptr, &kHelper, /*charge*/ 1));
  859. ASSERT_OK(cache->Insert(EncodeKey(2), nullptr, &kHelper, /*charge*/ 1));
  860. if (AreTwoCacheKeysOrdered(cache.get())) {
  861. seen_forward_order = true;
  862. } else {
  863. seen_reverse_order = true;
  864. }
  865. }
  866. ASSERT_TRUE(seen_forward_order);
  867. ASSERT_TRUE(seen_reverse_order);
  868. }
  869. TEST_P(CacheTest, CacheHostSeed) {
  870. // kHostHashSeed should generate a consistent seed within this process
  871. // (and other processes on the same host, but not unit testing that).
  872. // And we should be able to use that chosen seed as an explicit option
  873. // (for debugging).
  874. // And we should verify consistent ordering of entries.
  875. uint32_t expected_seed = 0;
  876. bool expected_order = false;
  877. // 10 iterations -> chance of a random seed falsely appearing consistent
  878. // should be low, just 1 in 2^9.
  879. for (int i = 0; i < 10; ++i) {
  880. auto cache = NewCache(2, [=](ShardedCacheOptions& opts) {
  881. if (i != 5) {
  882. opts.hash_seed = LRUCacheOptions::kHostHashSeed;
  883. } else {
  884. // Can be used as explicit seed
  885. opts.hash_seed = static_cast<int32_t>(expected_seed);
  886. ASSERT_GE(opts.hash_seed, 0);
  887. }
  888. opts.num_shard_bits = 0;
  889. opts.metadata_charge_policy = kDontChargeCacheMetadata;
  890. });
  891. ASSERT_OK(cache->Insert(EncodeKey(1), nullptr, &kHelper, /*charge*/ 1));
  892. ASSERT_OK(cache->Insert(EncodeKey(2), nullptr, &kHelper, /*charge*/ 1));
  893. uint32_t val = cache->GetHashSeed();
  894. bool order = AreTwoCacheKeysOrdered(cache.get());
  895. if (i != 0) {
  896. ASSERT_EQ(val, expected_seed);
  897. ASSERT_EQ(order, expected_order);
  898. } else {
  899. expected_seed = val;
  900. expected_order = order;
  901. }
  902. }
  903. // Printed for reference in case it's needed to reproduce other unit test
  904. // failures on another host
  905. fprintf(stderr, "kHostHashSeed -> %u\n", (unsigned)expected_seed);
  906. }
  907. INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
  908. secondary_cache_test_util::GetTestingCacheTypes());
  909. INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest,
  910. testing::Values(secondary_cache_test_util::kLRU));
  911. TEST(MiscBlockCacheTest, UncacheAggressivenessAdvisor) {
  912. // Aggressiveness to a sequence of Report() calls (as string of 0s and 1s)
  913. // exactly until the first ShouldContinue() == false.
  914. const std::vector<std::pair<uint32_t, Slice>> expectedTraces{
  915. // Aggressiveness 1 aborts on first unsuccessful erasure.
  916. {1, "0"},
  917. {1, "11111111111111111111110"},
  918. // For sufficient evidence, aggressiveness 2 requires a minimum of two
  919. // unsuccessful erasures.
  920. {2, "00"},
  921. {2, "0110"},
  922. {2, "1100"},
  923. {2, "011111111111111111111111111111111111111111111111111111111111111100"},
  924. {2, "0111111111111111111111111111111111110"},
  925. // For sufficient evidence, aggressiveness 3 and higher require a minimum
  926. // of three unsuccessful erasures.
  927. {3, "000"},
  928. {3, "01010"},
  929. {3, "111000"},
  930. {3, "00111111111111111111111111111111111100"},
  931. {3, "00111111111111111111110"},
  932. {4, "000"},
  933. {4, "01010"},
  934. {4, "111000"},
  935. {4, "001111111111111111111100"},
  936. {4, "0011111111111110"},
  937. {6, "000"},
  938. {6, "01010"},
  939. {6, "111000"},
  940. {6, "00111111111111100"},
  941. {6, "0011111110"},
  942. // 69 -> 50% threshold, now up to minimum of 4
  943. {69, "0000"},
  944. {69, "010000"},
  945. {69, "01010000"},
  946. {69, "101010100010101000"},
  947. // 230 -> 10% threshold, appropriately higher minimum
  948. {230, "000000000000"},
  949. {230, "0000000000010000000000"},
  950. {230, "00000000000100000000010000000000"}};
  951. for (const auto& [aggressiveness, t] : expectedTraces) {
  952. SCOPED_TRACE("aggressiveness=" + std::to_string(aggressiveness) + " with " +
  953. t.ToString());
  954. UncacheAggressivenessAdvisor uaa(aggressiveness);
  955. for (size_t i = 0; i < t.size(); ++i) {
  956. SCOPED_TRACE("i=" + std::to_string(i));
  957. ASSERT_TRUE(uaa.ShouldContinue());
  958. uaa.Report(t[i] & 1);
  959. }
  960. ASSERT_FALSE(uaa.ShouldContinue());
  961. }
  962. }
  963. } // namespace ROCKSDB_NAMESPACE
  964. int main(int argc, char** argv) {
  965. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  966. ::testing::InitGoogleTest(&argc, argv);
  967. return RUN_ALL_TESTS();
  968. }