tiered_secondary_cache_test.cc 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. // Copyright (c) Meta Platforms, Inc. and affiliates.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. #include "cache/compressed_secondary_cache.h"
  7. #include "cache/secondary_cache_adapter.h"
  8. #include "db/db_test_util.h"
  9. #include "rocksdb/cache.h"
  10. #include "rocksdb/secondary_cache.h"
  11. #include "typed_cache.h"
  12. #include "util/random.h"
  13. namespace ROCKSDB_NAMESPACE {
  14. class TestSecondaryCache : public SecondaryCache {
  15. public:
  16. explicit TestSecondaryCache(size_t capacity, bool ready_before_wait)
  17. : cache_(NewLRUCache(capacity, 0, false, 0.5 /* high_pri_pool_ratio */,
  18. nullptr, kDefaultToAdaptiveMutex,
  19. kDontChargeCacheMetadata)),
  20. ready_before_wait_(ready_before_wait),
  21. num_insert_saved_(0),
  22. num_hits_(0),
  23. num_misses_(0) {}
  24. const char* Name() const override { return "TestSecondaryCache"; }
  25. Status Insert(const Slice& /*key*/, Cache::ObjectPtr /*value*/,
  26. const Cache::CacheItemHelper* /*helper*/,
  27. bool /*force_insert*/) override {
  28. assert(false);
  29. return Status::NotSupported();
  30. }
  31. Status InsertSaved(const Slice& key, const Slice& saved,
  32. CompressionType type = kNoCompression,
  33. CacheTier source = CacheTier::kVolatileTier) override {
  34. CheckCacheKeyCommonPrefix(key);
  35. size_t size;
  36. char* buf;
  37. Status s;
  38. num_insert_saved_++;
  39. size = saved.size();
  40. buf = new char[size + sizeof(uint64_t) + 2 * sizeof(uint16_t)];
  41. EncodeFixed64(buf, size);
  42. buf += sizeof(uint64_t);
  43. EncodeFixed16(buf, type);
  44. buf += sizeof(uint16_t);
  45. EncodeFixed16(buf, (uint16_t)source);
  46. buf += sizeof(uint16_t);
  47. memcpy(buf, saved.data(), size);
  48. buf -= sizeof(uint64_t) + 2 * sizeof(uint16_t);
  49. if (!s.ok()) {
  50. delete[] buf;
  51. return s;
  52. }
  53. return cache_.Insert(key, buf, size);
  54. }
  55. std::unique_ptr<SecondaryCacheResultHandle> Lookup(
  56. const Slice& key, const Cache::CacheItemHelper* helper,
  57. Cache::CreateContext* create_context, bool wait, bool /*advise_erase*/,
  58. Statistics* /*stats*/, bool& kept_in_sec_cache) override {
  59. std::string key_str = key.ToString();
  60. TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str);
  61. std::unique_ptr<SecondaryCacheResultHandle> secondary_handle;
  62. kept_in_sec_cache = false;
  63. TypedHandle* handle = cache_.Lookup(key);
  64. if (handle) {
  65. num_hits_++;
  66. Cache::ObjectPtr value = nullptr;
  67. size_t charge = 0;
  68. Status s;
  69. char* ptr = cache_.Value(handle);
  70. CompressionType type;
  71. CacheTier source;
  72. size_t size = DecodeFixed64(ptr);
  73. ptr += sizeof(uint64_t);
  74. type = static_cast<CompressionType>(DecodeFixed16(ptr));
  75. ptr += sizeof(uint16_t);
  76. source = static_cast<CacheTier>(DecodeFixed16(ptr));
  77. assert(source == CacheTier::kVolatileTier);
  78. ptr += sizeof(uint16_t);
  79. s = helper->create_cb(Slice(ptr, size), type, source, create_context,
  80. /*alloc*/ nullptr, &value, &charge);
  81. if (s.ok()) {
  82. secondary_handle.reset(new TestSecondaryCacheResultHandle(
  83. cache_.get(), handle, value, charge,
  84. /*ready=*/wait || ready_before_wait_));
  85. kept_in_sec_cache = true;
  86. } else {
  87. cache_.Release(handle);
  88. }
  89. } else {
  90. num_misses_++;
  91. }
  92. return secondary_handle;
  93. }
  94. bool SupportForceErase() const override { return false; }
  95. void Erase(const Slice& /*key*/) override {}
  96. void WaitAll(std::vector<SecondaryCacheResultHandle*> handles) override {
  97. for (SecondaryCacheResultHandle* handle : handles) {
  98. TestSecondaryCacheResultHandle* sec_handle =
  99. static_cast<TestSecondaryCacheResultHandle*>(handle);
  100. EXPECT_FALSE(sec_handle->IsReady());
  101. sec_handle->SetReady();
  102. }
  103. }
  104. std::string GetPrintableOptions() const override { return ""; }
  105. uint32_t num_insert_saved() { return num_insert_saved_; }
  106. uint32_t num_hits() { return num_hits_; }
  107. uint32_t num_misses() { return num_misses_; }
  108. void CheckCacheKeyCommonPrefix(const Slice& key) {
  109. Slice current_prefix(key.data(), OffsetableCacheKey::kCommonPrefixSize);
  110. if (ckey_prefix_.empty()) {
  111. ckey_prefix_ = current_prefix.ToString();
  112. } else {
  113. EXPECT_EQ(ckey_prefix_, current_prefix.ToString());
  114. }
  115. }
  116. private:
  117. class TestSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
  118. public:
  119. TestSecondaryCacheResultHandle(Cache* cache, Cache::Handle* handle,
  120. Cache::ObjectPtr value, size_t size,
  121. bool ready)
  122. : cache_(cache),
  123. handle_(handle),
  124. value_(value),
  125. size_(size),
  126. is_ready_(ready) {}
  127. ~TestSecondaryCacheResultHandle() override { cache_->Release(handle_); }
  128. bool IsReady() override { return is_ready_; }
  129. void Wait() override {}
  130. Cache::ObjectPtr Value() override {
  131. assert(is_ready_);
  132. return value_;
  133. }
  134. size_t Size() override { return Value() ? size_ : 0; }
  135. void SetReady() { is_ready_ = true; }
  136. private:
  137. Cache* cache_;
  138. Cache::Handle* handle_;
  139. Cache::ObjectPtr value_;
  140. size_t size_;
  141. bool is_ready_;
  142. };
  143. using SharedCache =
  144. BasicTypedSharedCacheInterface<char[], CacheEntryRole::kMisc>;
  145. using TypedHandle = SharedCache::TypedHandle;
  146. SharedCache cache_;
  147. bool ready_before_wait_;
  148. uint32_t num_insert_saved_;
  149. uint32_t num_hits_;
  150. uint32_t num_misses_;
  151. std::string ckey_prefix_;
  152. };
  153. class DBTieredSecondaryCacheTest : public DBTestBase {
  154. public:
  155. DBTieredSecondaryCacheTest()
  156. : DBTestBase("db_tiered_secondary_cache_test", /*env_do_fsync=*/true) {}
  157. std::shared_ptr<Cache> NewCache(
  158. size_t pri_capacity, size_t compressed_capacity, size_t nvm_capacity,
  159. TieredAdmissionPolicy adm_policy = TieredAdmissionPolicy::kAdmPolicyAuto,
  160. bool ready_before_wait = false) {
  161. LRUCacheOptions lru_opts;
  162. TieredCacheOptions opts;
  163. lru_opts.capacity = 0;
  164. lru_opts.num_shard_bits = 0;
  165. lru_opts.high_pri_pool_ratio = 0;
  166. opts.cache_opts = &lru_opts;
  167. opts.cache_type = PrimaryCacheType::kCacheTypeLRU;
  168. opts.comp_cache_opts.capacity = 0;
  169. opts.comp_cache_opts.num_shard_bits = 0;
  170. opts.total_capacity = pri_capacity + compressed_capacity;
  171. opts.compressed_secondary_ratio = compressed_secondary_ratio_ =
  172. (double)compressed_capacity / opts.total_capacity;
  173. if (nvm_capacity > 0) {
  174. nvm_sec_cache_.reset(
  175. new TestSecondaryCache(nvm_capacity, ready_before_wait));
  176. opts.nvm_sec_cache = nvm_sec_cache_;
  177. }
  178. opts.adm_policy = adm_policy;
  179. cache_ = NewTieredCache(opts);
  180. assert(cache_ != nullptr);
  181. return cache_;
  182. }
  183. void ClearPrimaryCache() {
  184. ASSERT_EQ(UpdateTieredCache(cache_, -1, 1.0), Status::OK());
  185. ASSERT_EQ(UpdateTieredCache(cache_, -1, compressed_secondary_ratio_),
  186. Status::OK());
  187. }
  188. TestSecondaryCache* nvm_sec_cache() { return nvm_sec_cache_.get(); }
  189. CompressedSecondaryCache* compressed_secondary_cache() {
  190. return static_cast<CompressedSecondaryCache*>(
  191. static_cast<CacheWithSecondaryAdapter*>(cache_.get())
  192. ->TEST_GetSecondaryCache());
  193. }
  194. private:
  195. std::shared_ptr<Cache> cache_;
  196. std::shared_ptr<TestSecondaryCache> nvm_sec_cache_;
  197. double compressed_secondary_ratio_;
  198. };
  199. // In this test, the block size is set to 4096. Each value is 1007 bytes, so
  200. // each data block contains exactly 4 KV pairs. Metadata blocks are not
  201. // cached, so we can accurately estimate the cache usage.
  202. TEST_F(DBTieredSecondaryCacheTest, BasicTest) {
  203. if (!LZ4_Supported()) {
  204. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  205. return;
  206. }
  207. BlockBasedTableOptions table_options;
  208. // We want a block cache of size 5KB, and a compressed secondary cache of
  209. // size 5KB. However, we specify a block cache size of 256KB here in order
  210. // to take into account the cache reservation in the block cache on
  211. // behalf of the compressed cache. The unit of cache reservation is 256KB.
  212. // The effective block cache capacity will be calculated as 256 + 5 = 261KB,
  213. // and 256KB will be reserved for the compressed cache, leaving 5KB for
  214. // the primary block cache. We only have to worry about this here because
  215. // the cache size is so small.
  216. table_options.block_cache = NewCache(256 * 1024, 5 * 1024, 256 * 1024);
  217. table_options.block_size = 4 * 1024;
  218. table_options.cache_index_and_filter_blocks = false;
  219. Options options = GetDefaultOptions();
  220. options.create_if_missing = true;
  221. options.compression = kLZ4Compression;
  222. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  223. // Disable paranoid_file_checks so that flush will not read back the newly
  224. // written file
  225. options.paranoid_file_checks = false;
  226. DestroyAndReopen(options);
  227. Random rnd(301);
  228. const int N = 256;
  229. for (int i = 0; i < N; i++) {
  230. std::string p_v;
  231. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  232. ASSERT_OK(Put(Key(i), p_v));
  233. }
  234. ASSERT_OK(Flush());
  235. // The first 2 Gets, for keys 0 and 5, will load the corresponding data
  236. // blocks as they will be cache misses. The nvm secondary cache will be
  237. // warmed up with the compressed blocks
  238. std::string v = Get(Key(0));
  239. ASSERT_EQ(1007, v.size());
  240. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 1u);
  241. ASSERT_EQ(nvm_sec_cache()->num_misses(), 1u);
  242. v = Get(Key(5));
  243. ASSERT_EQ(1007, v.size());
  244. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  245. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  246. // At this point, the nvm cache is warmed up with the data blocks for 0
  247. // and 5. The next Get will lookup the block in nvm and will be a hit.
  248. // It will be created as a standalone entry in memory, and a placeholder
  249. // will be inserted in the primary and compressed caches.
  250. v = Get(Key(0));
  251. ASSERT_EQ(1007, v.size());
  252. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  253. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  254. ASSERT_EQ(nvm_sec_cache()->num_hits(), 1u);
  255. // For this Get, the primary and compressed only have placeholders for
  256. // the required data block. So we will lookup the nvm cache and find the
  257. // block there. This time, the block will be promoted to the primary
  258. // block cache. No promotion to the compressed secondary cache happens,
  259. // and it will retain the placeholder.
  260. v = Get(Key(0));
  261. ASSERT_EQ(1007, v.size());
  262. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  263. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  264. ASSERT_EQ(nvm_sec_cache()->num_hits(), 2u);
  265. // This Get will find the data block in the primary cache.
  266. v = Get(Key(0));
  267. ASSERT_EQ(1007, v.size());
  268. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  269. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  270. ASSERT_EQ(nvm_sec_cache()->num_hits(), 2u);
  271. // We repeat the sequence for key 5. This will end up evicting the block
  272. // for 0 from the in-memory cache.
  273. v = Get(Key(5));
  274. ASSERT_EQ(1007, v.size());
  275. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  276. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  277. ASSERT_EQ(nvm_sec_cache()->num_hits(), 3u);
  278. v = Get(Key(5));
  279. ASSERT_EQ(1007, v.size());
  280. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  281. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  282. ASSERT_EQ(nvm_sec_cache()->num_hits(), 4u);
  283. v = Get(Key(5));
  284. ASSERT_EQ(1007, v.size());
  285. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  286. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  287. ASSERT_EQ(nvm_sec_cache()->num_hits(), 4u);
  288. // This Get for key 0 will find the data block in nvm. Since the compressed
  289. // cache still has the placeholder, the block (compressed) will be
  290. // admitted. It is theh inserted into the primary as a standalone entry.
  291. v = Get(Key(0));
  292. ASSERT_EQ(1007, v.size());
  293. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  294. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  295. ASSERT_EQ(nvm_sec_cache()->num_hits(), 5u);
  296. // This Get for key 0 will find the data block in the compressed secondary
  297. // cache.
  298. v = Get(Key(0));
  299. ASSERT_EQ(1007, v.size());
  300. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 2u);
  301. ASSERT_EQ(nvm_sec_cache()->num_misses(), 2u);
  302. ASSERT_EQ(nvm_sec_cache()->num_hits(), 5u);
  303. Destroy(options);
  304. }
  305. // This test is very similar to BasicTest, except it calls MultiGet rather
  306. // than Get, in order to exercise the async lookup and WaitAll path.
  307. TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
  308. if (!LZ4_Supported()) {
  309. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  310. return;
  311. }
  312. BlockBasedTableOptions table_options;
  313. table_options.block_cache = NewCache(260 * 1024, 10 * 1024, 256 * 1024);
  314. table_options.block_size = 4 * 1024;
  315. table_options.cache_index_and_filter_blocks = false;
  316. Options options = GetDefaultOptions();
  317. options.create_if_missing = true;
  318. options.compression = kLZ4Compression;
  319. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  320. options.paranoid_file_checks = false;
  321. DestroyAndReopen(options);
  322. Random rnd(301);
  323. const int N = 256;
  324. for (int i = 0; i < N; i++) {
  325. std::string p_v;
  326. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  327. ASSERT_OK(Put(Key(i), p_v));
  328. }
  329. ASSERT_OK(Flush());
  330. std::vector<std::string> keys;
  331. std::vector<std::string> values;
  332. keys.push_back(Key(0));
  333. keys.push_back(Key(4));
  334. keys.push_back(Key(8));
  335. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  336. ASSERT_EQ(values.size(), keys.size());
  337. for (const auto& value : values) {
  338. ASSERT_EQ(1007, value.size());
  339. }
  340. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
  341. ASSERT_EQ(nvm_sec_cache()->num_misses(), 3u);
  342. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  343. keys.clear();
  344. values.clear();
  345. keys.push_back(Key(12));
  346. keys.push_back(Key(16));
  347. keys.push_back(Key(20));
  348. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  349. ASSERT_EQ(values.size(), keys.size());
  350. for (const auto& value : values) {
  351. ASSERT_EQ(1007, value.size());
  352. }
  353. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  354. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  355. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  356. keys.clear();
  357. values.clear();
  358. keys.push_back(Key(0));
  359. keys.push_back(Key(4));
  360. keys.push_back(Key(8));
  361. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  362. ASSERT_EQ(values.size(), keys.size());
  363. for (const auto& value : values) {
  364. ASSERT_EQ(1007, value.size());
  365. }
  366. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  367. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  368. ASSERT_EQ(nvm_sec_cache()->num_hits(), 3u);
  369. keys.clear();
  370. values.clear();
  371. keys.push_back(Key(0));
  372. keys.push_back(Key(4));
  373. keys.push_back(Key(8));
  374. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  375. ASSERT_EQ(values.size(), keys.size());
  376. for (const auto& value : values) {
  377. ASSERT_EQ(1007, value.size());
  378. }
  379. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  380. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  381. ASSERT_EQ(nvm_sec_cache()->num_hits(), 6u);
  382. keys.clear();
  383. values.clear();
  384. keys.push_back(Key(0));
  385. keys.push_back(Key(4));
  386. keys.push_back(Key(8));
  387. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  388. ASSERT_EQ(values.size(), keys.size());
  389. for (const auto& value : values) {
  390. ASSERT_EQ(1007, value.size());
  391. }
  392. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  393. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  394. ASSERT_EQ(nvm_sec_cache()->num_hits(), 6u);
  395. keys.clear();
  396. values.clear();
  397. keys.push_back(Key(12));
  398. keys.push_back(Key(16));
  399. keys.push_back(Key(20));
  400. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  401. ASSERT_EQ(values.size(), keys.size());
  402. for (const auto& value : values) {
  403. ASSERT_EQ(1007, value.size());
  404. }
  405. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  406. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  407. ASSERT_EQ(nvm_sec_cache()->num_hits(), 9u);
  408. keys.clear();
  409. values.clear();
  410. keys.push_back(Key(12));
  411. keys.push_back(Key(16));
  412. keys.push_back(Key(20));
  413. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  414. ASSERT_EQ(values.size(), keys.size());
  415. for (const auto& value : values) {
  416. ASSERT_EQ(1007, value.size());
  417. }
  418. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  419. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  420. ASSERT_EQ(nvm_sec_cache()->num_hits(), 12u);
  421. keys.clear();
  422. values.clear();
  423. keys.push_back(Key(12));
  424. keys.push_back(Key(16));
  425. keys.push_back(Key(20));
  426. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  427. ASSERT_EQ(values.size(), keys.size());
  428. for (const auto& value : values) {
  429. ASSERT_EQ(1007, value.size());
  430. }
  431. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  432. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  433. ASSERT_EQ(nvm_sec_cache()->num_hits(), 12u);
  434. Destroy(options);
  435. }
  436. TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
  437. if (!LZ4_Supported()) {
  438. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  439. return;
  440. }
  441. BlockBasedTableOptions table_options;
  442. table_options.block_cache = NewCache(250 * 1024, 20 * 1024, 256 * 1024);
  443. table_options.block_size = 4 * 1024;
  444. table_options.cache_index_and_filter_blocks = false;
  445. Options options = GetDefaultOptions();
  446. options.create_if_missing = true;
  447. options.compression = kLZ4Compression;
  448. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  449. options.paranoid_file_checks = false;
  450. DestroyAndReopen(options);
  451. Random rnd(301);
  452. const int N = 256;
  453. for (int i = 0; i < N; i++) {
  454. std::string p_v;
  455. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  456. ASSERT_OK(Put(Key(i), p_v));
  457. }
  458. ASSERT_OK(Flush());
  459. std::vector<std::string> keys;
  460. std::vector<std::string> values;
  461. keys.push_back(Key(0));
  462. keys.push_back(Key(4));
  463. keys.push_back(Key(8));
  464. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  465. ASSERT_EQ(values.size(), keys.size());
  466. for (const auto& value : values) {
  467. ASSERT_EQ(1007, value.size());
  468. }
  469. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
  470. ASSERT_EQ(nvm_sec_cache()->num_misses(), 3u);
  471. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  472. keys.clear();
  473. values.clear();
  474. keys.push_back(Key(12));
  475. keys.push_back(Key(16));
  476. keys.push_back(Key(20));
  477. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  478. ASSERT_EQ(values.size(), keys.size());
  479. for (const auto& value : values) {
  480. ASSERT_EQ(1007, value.size());
  481. }
  482. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  483. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  484. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  485. // Insert placeholders for 4 in primary and compressed
  486. std::string val = Get(Key(4));
  487. // Force placeholder 4 out of primary
  488. keys.clear();
  489. values.clear();
  490. keys.push_back(Key(24));
  491. keys.push_back(Key(28));
  492. keys.push_back(Key(32));
  493. keys.push_back(Key(36));
  494. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  495. ASSERT_EQ(values.size(), keys.size());
  496. for (const auto& value : values) {
  497. ASSERT_EQ(1007, value.size());
  498. }
  499. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
  500. ASSERT_EQ(nvm_sec_cache()->num_misses(), 10u);
  501. ASSERT_EQ(nvm_sec_cache()->num_hits(), 1u);
  502. // Now read 4 again. This will create a placeholder in primary, and insert
  503. // in compressed secondary since it already has a placeholder
  504. val = Get(Key(4));
  505. // Now read 0, 4 and 8. While 4 is already in the compressed secondary
  506. // cache, 0 and 8 will be read asynchronously from the nvm tier. The
  507. // WaitAll will be called for all 3 blocks.
  508. keys.clear();
  509. values.clear();
  510. keys.push_back(Key(0));
  511. keys.push_back(Key(4));
  512. keys.push_back(Key(8));
  513. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  514. ASSERT_EQ(values.size(), keys.size());
  515. for (const auto& value : values) {
  516. ASSERT_EQ(1007, value.size());
  517. }
  518. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
  519. ASSERT_EQ(nvm_sec_cache()->num_misses(), 10u);
  520. ASSERT_EQ(nvm_sec_cache()->num_hits(), 4u);
  521. Destroy(options);
  522. }
  523. TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
  524. if (!LZ4_Supported()) {
  525. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  526. return;
  527. }
  528. BlockBasedTableOptions table_options;
  529. table_options.block_cache = NewCache(250 * 1024, 20 * 1024, 256 * 1024,
  530. TieredAdmissionPolicy::kAdmPolicyAuto,
  531. /*ready_before_wait=*/true);
  532. table_options.block_size = 4 * 1024;
  533. table_options.cache_index_and_filter_blocks = false;
  534. Options options = GetDefaultOptions();
  535. options.create_if_missing = true;
  536. options.compression = kLZ4Compression;
  537. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  538. options.statistics = CreateDBStatistics();
  539. options.paranoid_file_checks = false;
  540. DestroyAndReopen(options);
  541. Random rnd(301);
  542. const int N = 256;
  543. for (int i = 0; i < N; i++) {
  544. std::string p_v;
  545. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  546. ASSERT_OK(Put(Key(i), p_v));
  547. }
  548. ASSERT_OK(Flush());
  549. std::vector<std::string> keys;
  550. std::vector<std::string> values;
  551. keys.push_back(Key(0));
  552. keys.push_back(Key(4));
  553. keys.push_back(Key(8));
  554. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  555. ASSERT_EQ(values.size(), keys.size());
  556. for (const auto& value : values) {
  557. ASSERT_EQ(1007, value.size());
  558. }
  559. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
  560. ASSERT_EQ(nvm_sec_cache()->num_misses(), 3u);
  561. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  562. ASSERT_EQ(options.statistics->getTickerCount(BLOCK_CACHE_MISS), 3u);
  563. keys.clear();
  564. values.clear();
  565. keys.push_back(Key(12));
  566. keys.push_back(Key(16));
  567. keys.push_back(Key(20));
  568. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  569. ASSERT_EQ(values.size(), keys.size());
  570. for (const auto& value : values) {
  571. ASSERT_EQ(1007, value.size());
  572. }
  573. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  574. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  575. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  576. ASSERT_EQ(options.statistics->getTickerCount(BLOCK_CACHE_MISS), 6u);
  577. keys.clear();
  578. values.clear();
  579. keys.push_back(Key(0));
  580. keys.push_back(Key(4));
  581. keys.push_back(Key(8));
  582. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  583. ASSERT_EQ(values.size(), keys.size());
  584. for (const auto& value : values) {
  585. ASSERT_EQ(1007, value.size());
  586. }
  587. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
  588. ASSERT_EQ(nvm_sec_cache()->num_misses(), 6u);
  589. ASSERT_EQ(nvm_sec_cache()->num_hits(), 3u);
  590. ASSERT_EQ(options.statistics->getTickerCount(BLOCK_CACHE_MISS), 6u);
  591. ClearPrimaryCache();
  592. keys.clear();
  593. values.clear();
  594. keys.push_back(Key(0));
  595. keys.push_back(Key(32));
  596. keys.push_back(Key(36));
  597. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  598. ASSERT_EQ(values.size(), keys.size());
  599. for (const auto& value : values) {
  600. ASSERT_EQ(1007, value.size());
  601. }
  602. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
  603. ASSERT_EQ(nvm_sec_cache()->num_misses(), 8u);
  604. ASSERT_EQ(nvm_sec_cache()->num_hits(), 4u);
  605. ASSERT_EQ(options.statistics->getTickerCount(BLOCK_CACHE_MISS), 8u);
  606. keys.clear();
  607. values.clear();
  608. keys.push_back(Key(0));
  609. keys.push_back(Key(32));
  610. keys.push_back(Key(36));
  611. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  612. ASSERT_EQ(values.size(), keys.size());
  613. for (const auto& value : values) {
  614. ASSERT_EQ(1007, value.size());
  615. }
  616. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
  617. ASSERT_EQ(nvm_sec_cache()->num_misses(), 8u);
  618. ASSERT_EQ(nvm_sec_cache()->num_hits(), 4u);
  619. ASSERT_EQ(options.statistics->getTickerCount(BLOCK_CACHE_MISS), 8u);
  620. Destroy(options);
  621. }
  622. // This test is for iteration. It iterates through a set of keys in two
  623. // passes. First pass loads the compressed blocks into the nvm tier, and
  624. // the second pass should hit all of those blocks.
  625. TEST_F(DBTieredSecondaryCacheTest, IterateTest) {
  626. if (!LZ4_Supported()) {
  627. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  628. return;
  629. }
  630. BlockBasedTableOptions table_options;
  631. table_options.block_cache = NewCache(250 * 1024, 10 * 1024, 256 * 1024);
  632. table_options.block_size = 4 * 1024;
  633. table_options.cache_index_and_filter_blocks = false;
  634. Options options = GetDefaultOptions();
  635. options.create_if_missing = true;
  636. options.compression = kLZ4Compression;
  637. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  638. options.paranoid_file_checks = false;
  639. DestroyAndReopen(options);
  640. Random rnd(301);
  641. const int N = 256;
  642. for (int i = 0; i < N; i++) {
  643. std::string p_v;
  644. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  645. ASSERT_OK(Put(Key(i), p_v));
  646. }
  647. ASSERT_OK(Flush());
  648. ReadOptions ro;
  649. ro.readahead_size = 256 * 1024;
  650. auto iter = dbfull()->NewIterator(ro);
  651. iter->SeekToFirst();
  652. for (int i = 0; i < 31; ++i) {
  653. ASSERT_EQ(Key(i), iter->key().ToString());
  654. ASSERT_EQ(1007, iter->value().size());
  655. iter->Next();
  656. }
  657. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
  658. ASSERT_EQ(nvm_sec_cache()->num_misses(), 8u);
  659. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  660. delete iter;
  661. iter = dbfull()->NewIterator(ro);
  662. iter->SeekToFirst();
  663. for (int i = 0; i < 31; ++i) {
  664. ASSERT_EQ(Key(i), iter->key().ToString());
  665. ASSERT_EQ(1007, iter->value().size());
  666. iter->Next();
  667. }
  668. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
  669. ASSERT_EQ(nvm_sec_cache()->num_misses(), 8u);
  670. ASSERT_EQ(nvm_sec_cache()->num_hits(), 8u);
  671. delete iter;
  672. Destroy(options);
  673. }
  674. TEST_F(DBTieredSecondaryCacheTest, VolatileTierTest) {
  675. if (!LZ4_Supported()) {
  676. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  677. return;
  678. }
  679. BlockBasedTableOptions table_options;
  680. // We want a block cache of size 5KB, and a compressed secondary cache of
  681. // size 5KB. However, we specify a block cache size of 256KB here in order
  682. // to take into account the cache reservation in the block cache on
  683. // behalf of the compressed cache. The unit of cache reservation is 256KB.
  684. // The effective block cache capacity will be calculated as 256 + 5 = 261KB,
  685. // and 256KB will be reserved for the compressed cache, leaving 5KB for
  686. // the primary block cache. We only have to worry about this here because
  687. // the cache size is so small.
  688. table_options.block_cache = NewCache(256 * 1024, 5 * 1024, 256 * 1024);
  689. table_options.block_size = 4 * 1024;
  690. table_options.cache_index_and_filter_blocks = false;
  691. Options options = GetDefaultOptions();
  692. options.create_if_missing = true;
  693. options.compression = kLZ4Compression;
  694. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  695. // Disable paranoid_file_checks so that flush will not read back the newly
  696. // written file
  697. options.paranoid_file_checks = false;
  698. options.lowest_used_cache_tier = CacheTier::kVolatileTier;
  699. DestroyAndReopen(options);
  700. Random rnd(301);
  701. const int N = 256;
  702. for (int i = 0; i < N; i++) {
  703. std::string p_v;
  704. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  705. ASSERT_OK(Put(Key(i), p_v));
  706. }
  707. ASSERT_OK(Flush());
  708. // Since lowest_used_cache_tier is the volatile tier, nothing should be
  709. // inserted in the secondary cache.
  710. std::string v = Get(Key(0));
  711. ASSERT_EQ(1007, v.size());
  712. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 0u);
  713. ASSERT_EQ(nvm_sec_cache()->num_misses(), 0u);
  714. Destroy(options);
  715. }
  716. class DBTieredAdmPolicyTest
  717. : public DBTieredSecondaryCacheTest,
  718. public testing::WithParamInterface<TieredAdmissionPolicy> {};
  719. TEST_P(DBTieredAdmPolicyTest, CompressedOnlyTest) {
  720. if (!LZ4_Supported()) {
  721. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  722. return;
  723. }
  724. BlockBasedTableOptions table_options;
  725. // We want a block cache of size 10KB, and a compressed secondary cache of
  726. // size 10KB. However, we specify a block cache size of 256KB here in order
  727. // to take into account the cache reservation in the block cache on
  728. // behalf of the compressed cache. The unit of cache reservation is 256KB.
  729. // The effective block cache capacity will be calculated as 256 + 10 = 266KB,
  730. // and 256KB will be reserved for the compressed cache, leaving 10KB for
  731. // the primary block cache. We only have to worry about this here because
  732. // the cache size is so small.
  733. table_options.block_cache = NewCache(256 * 1024, 10 * 1024, 0, GetParam());
  734. table_options.block_size = 4 * 1024;
  735. table_options.cache_index_and_filter_blocks = false;
  736. Options options = GetDefaultOptions();
  737. options.create_if_missing = true;
  738. options.compression = kLZ4Compression;
  739. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  740. size_t comp_cache_usage = compressed_secondary_cache()->TEST_GetUsage();
  741. // Disable paranoid_file_checks so that flush will not read back the newly
  742. // written file
  743. options.paranoid_file_checks = false;
  744. DestroyAndReopen(options);
  745. Random rnd(301);
  746. const int N = 256;
  747. for (int i = 0; i < N; i++) {
  748. std::string p_v;
  749. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  750. ASSERT_OK(Put(Key(i), p_v));
  751. }
  752. ASSERT_OK(Flush());
  753. // The first 2 Gets, for keys 0 and 5, will load the corresponding data
  754. // blocks as they will be cache misses. Since this is a 2-tier cache (
  755. // primary and compressed), no warm-up should happen with the compressed
  756. // blocks.
  757. std::string v = Get(Key(0));
  758. ASSERT_EQ(1007, v.size());
  759. v = Get(Key(5));
  760. ASSERT_EQ(1007, v.size());
  761. ASSERT_EQ(compressed_secondary_cache()->TEST_GetUsage(), comp_cache_usage);
  762. Destroy(options);
  763. }
  764. TEST_P(DBTieredAdmPolicyTest, CompressedCacheAdmission) {
  765. if (!LZ4_Supported()) {
  766. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  767. return;
  768. }
  769. BlockBasedTableOptions table_options;
  770. // We want a block cache of size 5KB, and a compressed secondary cache of
  771. // size 5KB. However, we specify a block cache size of 256KB here in order
  772. // to take into account the cache reservation in the block cache on
  773. // behalf of the compressed cache. The unit of cache reservation is 256KB.
  774. // The effective block cache capacity will be calculated as 256 + 5 = 261KB,
  775. // and 256KB will be reserved for the compressed cache, leaving 10KB for
  776. // the primary block cache. We only have to worry about this here because
  777. // the cache size is so small.
  778. table_options.block_cache = NewCache(256 * 1024, 5 * 1024, 0, GetParam());
  779. table_options.block_size = 4 * 1024;
  780. table_options.cache_index_and_filter_blocks = false;
  781. Options options = GetDefaultOptions();
  782. options.create_if_missing = true;
  783. options.compression = kLZ4Compression;
  784. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  785. size_t comp_cache_usage = compressed_secondary_cache()->TEST_GetUsage();
  786. // Disable paranoid_file_checks so that flush will not read back the newly
  787. // written file
  788. options.paranoid_file_checks = false;
  789. DestroyAndReopen(options);
  790. Random rnd(301);
  791. const int N = 256;
  792. for (int i = 0; i < N; i++) {
  793. std::string p_v;
  794. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  795. ASSERT_OK(Put(Key(i), p_v));
  796. }
  797. ASSERT_OK(Flush());
  798. // The second Get (for 5) will evict the data block loaded by the first
  799. // Get, which will be admitted into the compressed secondary cache only
  800. // for the kAdmPolicyAllowAll policy
  801. std::string v = Get(Key(0));
  802. ASSERT_EQ(1007, v.size());
  803. v = Get(Key(5));
  804. ASSERT_EQ(1007, v.size());
  805. if (GetParam() == TieredAdmissionPolicy::kAdmPolicyAllowAll) {
  806. ASSERT_GT(compressed_secondary_cache()->TEST_GetUsage(),
  807. comp_cache_usage + 128);
  808. } else {
  809. ASSERT_LT(compressed_secondary_cache()->TEST_GetUsage(),
  810. comp_cache_usage + 128);
  811. }
  812. Destroy(options);
  813. }
  814. TEST_F(DBTieredSecondaryCacheTest, FSBufferTest) {
  815. class WrapFS : public FileSystemWrapper {
  816. public:
  817. explicit WrapFS(const std::shared_ptr<FileSystem>& _target)
  818. : FileSystemWrapper(_target) {}
  819. ~WrapFS() override {}
  820. const char* Name() const override { return "WrapFS"; }
  821. IOStatus NewRandomAccessFile(const std::string& fname,
  822. const FileOptions& opts,
  823. std::unique_ptr<FSRandomAccessFile>* result,
  824. IODebugContext* dbg) override {
  825. class WrappedRandomAccessFile : public FSRandomAccessFileOwnerWrapper {
  826. public:
  827. explicit WrappedRandomAccessFile(
  828. std::unique_ptr<FSRandomAccessFile>& file)
  829. : FSRandomAccessFileOwnerWrapper(std::move(file)) {}
  830. IOStatus MultiRead(FSReadRequest* reqs, size_t num_reqs,
  831. const IOOptions& options,
  832. IODebugContext* dbg) override {
  833. for (size_t i = 0; i < num_reqs; ++i) {
  834. FSReadRequest& req = reqs[i];
  835. // See https://github.com/facebook/rocksdb/pull/13195 for why we
  836. // want to set up our test implementation for FSAllocationPtr this
  837. // way.
  838. char* internalData = new char[req.len];
  839. req.status = Read(req.offset, req.len, options, &req.result,
  840. internalData, dbg);
  841. Slice* internalSlice = new Slice(internalData, req.len);
  842. FSAllocationPtr internalPtr(internalSlice, [](void* ptr) {
  843. delete[] static_cast<const char*>(
  844. static_cast<Slice*>(ptr)->data_);
  845. delete static_cast<Slice*>(ptr);
  846. });
  847. req.fs_scratch = std::move(internalPtr);
  848. }
  849. return IOStatus::OK();
  850. }
  851. };
  852. std::unique_ptr<FSRandomAccessFile> file;
  853. IOStatus s = target()->NewRandomAccessFile(fname, opts, &file, dbg);
  854. EXPECT_OK(s);
  855. result->reset(new WrappedRandomAccessFile(file));
  856. return s;
  857. }
  858. void SupportedOps(int64_t& supported_ops) override {
  859. supported_ops = 1 << FSSupportedOps::kAsyncIO;
  860. supported_ops |= 1 << FSSupportedOps::kFSBuffer;
  861. }
  862. };
  863. if (!LZ4_Supported()) {
  864. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  865. return;
  866. }
  867. std::shared_ptr<WrapFS> wrap_fs =
  868. std::make_shared<WrapFS>(env_->GetFileSystem());
  869. std::unique_ptr<Env> wrap_env(new CompositeEnvWrapper(env_, wrap_fs));
  870. BlockBasedTableOptions table_options;
  871. table_options.block_cache = NewCache(250 * 1024, 20 * 1024, 256 * 1024,
  872. TieredAdmissionPolicy::kAdmPolicyAuto,
  873. /*ready_before_wait=*/true);
  874. table_options.block_size = 4 * 1024;
  875. table_options.cache_index_and_filter_blocks = false;
  876. Options options = GetDefaultOptions();
  877. options.create_if_missing = true;
  878. options.compression = kLZ4Compression;
  879. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  880. options.statistics = CreateDBStatistics();
  881. options.env = wrap_env.get();
  882. options.paranoid_file_checks = false;
  883. DestroyAndReopen(options);
  884. Random rnd(301);
  885. const int N = 256;
  886. for (int i = 0; i < N; i++) {
  887. std::string p_v;
  888. test::CompressibleString(&rnd, 0.5, 1007, &p_v);
  889. ASSERT_OK(Put(Key(i), p_v));
  890. }
  891. ASSERT_OK(Flush());
  892. std::vector<std::string> keys;
  893. std::vector<std::string> values;
  894. keys.push_back(Key(0));
  895. keys.push_back(Key(4));
  896. keys.push_back(Key(8));
  897. values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
  898. ASSERT_EQ(values.size(), keys.size());
  899. for (const auto& value : values) {
  900. ASSERT_EQ(1007, value.size());
  901. }
  902. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
  903. ASSERT_EQ(nvm_sec_cache()->num_misses(), 3u);
  904. ASSERT_EQ(nvm_sec_cache()->num_hits(), 0u);
  905. std::string v = Get(Key(12));
  906. ASSERT_EQ(1007, v.size());
  907. ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 4u);
  908. ASSERT_EQ(nvm_sec_cache()->num_misses(), 4u);
  909. ASSERT_EQ(options.statistics->getTickerCount(BLOCK_CACHE_MISS), 4u);
  910. Close();
  911. Destroy(options);
  912. }
  913. INSTANTIATE_TEST_CASE_P(
  914. DBTieredAdmPolicyTest, DBTieredAdmPolicyTest,
  915. ::testing::Values(TieredAdmissionPolicy::kAdmPolicyAuto,
  916. TieredAdmissionPolicy::kAdmPolicyPlaceholder,
  917. TieredAdmissionPolicy::kAdmPolicyAllowCacheHits,
  918. TieredAdmissionPolicy::kAdmPolicyAllowAll));
  919. } // namespace ROCKSDB_NAMESPACE
  920. int main(int argc, char** argv) {
  921. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  922. ::testing::InitGoogleTest(&argc, argv);
  923. return RUN_ALL_TESTS();
  924. }