compressed_secondary_cache_test.cc 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. #include "cache/compressed_secondary_cache.h"
  6. #include <array>
  7. #include <iterator>
  8. #include <memory>
  9. #include <tuple>
  10. #include "cache/secondary_cache_adapter.h"
  11. #include "memory/jemalloc_nodump_allocator.h"
  12. #include "rocksdb/cache.h"
  13. #include "rocksdb/convenience.h"
  14. #include "test_util/secondary_cache_test_util.h"
  15. #include "test_util/testharness.h"
  16. #include "test_util/testutil.h"
  17. #include "util/cast_util.h"
  18. namespace ROCKSDB_NAMESPACE {
  19. using secondary_cache_test_util::GetTestingCacheTypes;
  20. using secondary_cache_test_util::WithCacheType;
  21. // Read and reset a statistic
  22. template <typename T>
  23. T Pop(T& var) {
  24. T ret = var;
  25. var = T();
  26. return ret;
  27. }
  28. // 16 bytes for HCC compatibility
  29. const std::string key0 = "____ ____key0";
  30. const std::string key1 = "____ ____key1";
  31. const std::string key2 = "____ ____key2";
  32. const std::string key3 = "____ ____key3";
  33. class CompressedSecondaryCacheTestBase : public testing::Test,
  34. public WithCacheType {
  35. public:
  36. CompressedSecondaryCacheTestBase() = default;
  37. ~CompressedSecondaryCacheTestBase() override = default;
  38. protected:
  39. void BasicTestHelper(std::shared_ptr<SecondaryCache> sec_cache,
  40. bool sec_cache_is_compressed) {
  41. CompressedSecondaryCache* comp_sec_cache =
  42. static_cast<CompressedSecondaryCache*>(sec_cache.get());
  43. get_perf_context()->Reset();
  44. bool kept_in_sec_cache{true};
  45. // Lookup an non-existent key.
  46. std::unique_ptr<SecondaryCacheResultHandle> handle0 =
  47. sec_cache->Lookup(key0, GetHelper(), this, true, /*advise_erase=*/true,
  48. /*stats=*/nullptr, kept_in_sec_cache);
  49. ASSERT_EQ(handle0, nullptr);
  50. Random rnd(301);
  51. // Insert and Lookup the item k1 for the first time.
  52. std::string str1 = test::CompressibleString(&rnd, 0.5, 1000);
  53. TestItem item1(str1.data(), str1.length());
  54. // A dummy handle is inserted if the item is inserted for the first time.
  55. ASSERT_OK(sec_cache->Insert(key1, &item1, GetHelper(), false));
  56. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 1);
  57. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  58. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  59. std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
  60. sec_cache->Lookup(key1, GetHelper(), this, true, /*advise_erase=*/false,
  61. /*stats=*/nullptr, kept_in_sec_cache);
  62. ASSERT_EQ(handle1_1, nullptr);
  63. // Insert and Lookup the item k1 for the second time and advise erasing it.
  64. ASSERT_OK(sec_cache->Insert(key1, &item1, GetHelper(), false));
  65. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1);
  66. if (sec_cache_is_compressed) {
  67. ASSERT_GT(comp_sec_cache->TEST_GetCharge(key1), str1.length() / 4);
  68. ASSERT_LT(comp_sec_cache->TEST_GetCharge(key1), str1.length() * 3 / 4);
  69. } else {
  70. ASSERT_GE(comp_sec_cache->TEST_GetCharge(key1), str1.length());
  71. // NOTE: split-merge is worse (1048 vs. 1024)
  72. ASSERT_LE(comp_sec_cache->TEST_GetCharge(key1), 1048U);
  73. }
  74. std::unique_ptr<SecondaryCacheResultHandle> handle1_2 =
  75. sec_cache->Lookup(key1, GetHelper(), this, true, /*advise_erase=*/true,
  76. /*stats=*/nullptr, kept_in_sec_cache);
  77. ASSERT_NE(handle1_2, nullptr);
  78. ASSERT_FALSE(kept_in_sec_cache);
  79. if (sec_cache_is_compressed) {
  80. ASSERT_EQ(
  81. Pop(get_perf_context()->compressed_sec_cache_uncompressed_bytes),
  82. str1.length());
  83. ASSERT_LT(get_perf_context()->compressed_sec_cache_compressed_bytes,
  84. str1.length() * 3 / 4);
  85. ASSERT_GT(Pop(get_perf_context()->compressed_sec_cache_compressed_bytes),
  86. str1.length() / 4);
  87. } else {
  88. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  89. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  90. }
  91. std::unique_ptr<TestItem> val1 =
  92. std::unique_ptr<TestItem>(static_cast<TestItem*>(handle1_2->Value()));
  93. ASSERT_NE(val1, nullptr);
  94. ASSERT_EQ(memcmp(val1->Buf(), item1.Buf(), item1.Size()), 0);
  95. // Lookup the item k1 again.
  96. std::unique_ptr<SecondaryCacheResultHandle> handle1_3 =
  97. sec_cache->Lookup(key1, GetHelper(), this, true, /*advise_erase=*/true,
  98. /*stats=*/nullptr, kept_in_sec_cache);
  99. ASSERT_EQ(handle1_3, nullptr);
  100. // Insert and Lookup the item k2.
  101. std::string str2 = test::CompressibleString(&rnd, 0.5, 1017);
  102. TestItem item2(str2.data(), str2.length());
  103. ASSERT_OK(sec_cache->Insert(key2, &item2, GetHelper(), false));
  104. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 2);
  105. std::unique_ptr<SecondaryCacheResultHandle> handle2_1 =
  106. sec_cache->Lookup(key2, GetHelper(), this, true, /*advise_erase=*/false,
  107. /*stats=*/nullptr, kept_in_sec_cache);
  108. ASSERT_EQ(handle2_1, nullptr);
  109. ASSERT_OK(sec_cache->Insert(key2, &item2, GetHelper(), false));
  110. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 2);
  111. if (sec_cache_is_compressed) {
  112. ASSERT_EQ(
  113. Pop(get_perf_context()->compressed_sec_cache_uncompressed_bytes),
  114. str2.length());
  115. ASSERT_LT(get_perf_context()->compressed_sec_cache_compressed_bytes,
  116. str2.length() * 3 / 4);
  117. ASSERT_GT(Pop(get_perf_context()->compressed_sec_cache_compressed_bytes),
  118. str2.length() / 4);
  119. } else {
  120. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  121. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  122. }
  123. std::unique_ptr<SecondaryCacheResultHandle> handle2_2 =
  124. sec_cache->Lookup(key2, GetHelper(), this, true, /*advise_erase=*/false,
  125. /*stats=*/nullptr, kept_in_sec_cache);
  126. ASSERT_NE(handle2_2, nullptr);
  127. std::unique_ptr<TestItem> val2 =
  128. std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2_2->Value()));
  129. ASSERT_NE(val2, nullptr);
  130. ASSERT_EQ(memcmp(val2->Buf(), item2.Buf(), item2.Size()), 0);
  131. // Release handles
  132. std::vector<SecondaryCacheResultHandle*> handles = {handle1_2.get(),
  133. handle2_2.get()};
  134. sec_cache->WaitAll(handles);
  135. handle1_2.reset();
  136. handle2_2.reset();
  137. // Insert and Lookup a non-compressible item k3.
  138. std::string str3 = rnd.RandomBinaryString(480);
  139. TestItem item3(str3.data(), str3.length());
  140. ASSERT_OK(sec_cache->Insert(key3, &item3, GetHelper(), false));
  141. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 3);
  142. std::unique_ptr<SecondaryCacheResultHandle> handle3_1 =
  143. sec_cache->Lookup(key3, GetHelper(), this, true, /*advise_erase=*/false,
  144. /*stats=*/nullptr, kept_in_sec_cache);
  145. ASSERT_EQ(handle3_1, nullptr);
  146. ASSERT_OK(sec_cache->Insert(key3, &item3, GetHelper(), false));
  147. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 3);
  148. if (sec_cache_is_compressed) {
  149. // TODO: consider a compression rejected stat?
  150. ASSERT_EQ(
  151. Pop(get_perf_context()->compressed_sec_cache_uncompressed_bytes),
  152. str3.length());
  153. ASSERT_EQ(Pop(get_perf_context()->compressed_sec_cache_compressed_bytes),
  154. str3.length());
  155. } else {
  156. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  157. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  158. }
  159. std::unique_ptr<SecondaryCacheResultHandle> handle3_2 =
  160. sec_cache->Lookup(key3, GetHelper(), this, true, /*advise_erase=*/false,
  161. /*stats=*/nullptr, kept_in_sec_cache);
  162. ASSERT_NE(handle3_2, nullptr);
  163. std::unique_ptr<TestItem> val3 =
  164. std::unique_ptr<TestItem>(static_cast<TestItem*>(handle3_2->Value()));
  165. ASSERT_NE(val3, nullptr);
  166. ASSERT_EQ(memcmp(val3->Buf(), item3.Buf(), item3.Size()), 0);
  167. EXPECT_GE(comp_sec_cache->TEST_GetCharge(key3), str3.length());
  168. EXPECT_LE(comp_sec_cache->TEST_GetCharge(key3), 512);
  169. sec_cache.reset();
  170. }
  171. void BasicTest(bool sec_cache_is_compressed, bool use_jemalloc) {
  172. CompressedSecondaryCacheOptions opts;
  173. opts.capacity = 2048;
  174. opts.num_shard_bits = 0;
  175. if (sec_cache_is_compressed) {
  176. if (!LZ4_Supported()) {
  177. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  178. opts.compression_type = CompressionType::kNoCompression;
  179. sec_cache_is_compressed = false;
  180. }
  181. } else {
  182. opts.compression_type = CompressionType::kNoCompression;
  183. }
  184. if (use_jemalloc) {
  185. JemallocAllocatorOptions jopts;
  186. std::shared_ptr<MemoryAllocator> allocator;
  187. std::string msg;
  188. if (JemallocNodumpAllocator::IsSupported(&msg)) {
  189. Status s = NewJemallocNodumpAllocator(jopts, &allocator);
  190. if (s.ok()) {
  191. opts.memory_allocator = allocator;
  192. }
  193. } else {
  194. ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
  195. }
  196. }
  197. std::shared_ptr<SecondaryCache> sec_cache =
  198. NewCompressedSecondaryCache(opts);
  199. BasicTestHelper(sec_cache, sec_cache_is_compressed);
  200. }
  201. void FailsTest(bool sec_cache_is_compressed) {
  202. CompressedSecondaryCacheOptions secondary_cache_opts;
  203. if (sec_cache_is_compressed) {
  204. if (!LZ4_Supported()) {
  205. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  206. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  207. }
  208. } else {
  209. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  210. }
  211. secondary_cache_opts.capacity = 1400;
  212. secondary_cache_opts.num_shard_bits = 0;
  213. secondary_cache_opts.strict_capacity_limit = true;
  214. std::shared_ptr<SecondaryCache> sec_cache =
  215. NewCompressedSecondaryCache(secondary_cache_opts);
  216. // Insert and Lookup the first item.
  217. Random rnd(301);
  218. std::string str1(rnd.RandomString(1000));
  219. TestItem item1(str1.data(), str1.length());
  220. // Insert a dummy handle.
  221. ASSERT_OK(sec_cache->Insert(key1, &item1, GetHelper(), false));
  222. // Insert k1.
  223. ASSERT_OK(sec_cache->Insert(key1, &item1, GetHelper(), false));
  224. // Insert and Lookup the second item.
  225. std::string str2(rnd.RandomString(500));
  226. TestItem item2(str2.data(), str2.length());
  227. // Insert a dummy handle, k1 is not evicted.
  228. ASSERT_OK(sec_cache->Insert(key2, &item2, GetHelper(), false));
  229. bool kept_in_sec_cache{false};
  230. std::unique_ptr<SecondaryCacheResultHandle> handle1 =
  231. sec_cache->Lookup(key1, GetHelper(), this, true, /*advise_erase=*/false,
  232. /*stats=*/nullptr, kept_in_sec_cache);
  233. ASSERT_NE(handle1, nullptr);
  234. std::unique_ptr<TestItem> val1{static_cast<TestItem*>(handle1->Value())};
  235. ASSERT_NE(val1, nullptr);
  236. ASSERT_EQ(val1->ToString(), str1);
  237. handle1.reset();
  238. // Insert k2 and k1 is evicted.
  239. ASSERT_OK(sec_cache->Insert(key2, &item2, GetHelper(), false));
  240. handle1 =
  241. sec_cache->Lookup(key1, GetHelper(), this, true, /*advise_erase=*/false,
  242. /*stats=*/nullptr, kept_in_sec_cache);
  243. ASSERT_EQ(handle1, nullptr);
  244. std::unique_ptr<SecondaryCacheResultHandle> handle2 =
  245. sec_cache->Lookup(key2, GetHelper(), this, true, /*advise_erase=*/false,
  246. /*stats=*/nullptr, kept_in_sec_cache);
  247. ASSERT_NE(handle2, nullptr);
  248. std::unique_ptr<TestItem> val2{static_cast<TestItem*>(handle2->Value())};
  249. ASSERT_NE(val2, nullptr);
  250. ASSERT_EQ(memcmp(val2->Buf(), item2.Buf(), item2.Size()), 0);
  251. // Insert k1 again and a dummy handle is inserted.
  252. ASSERT_OK(sec_cache->Insert(key1, &item1, GetHelper(), false));
  253. std::unique_ptr<SecondaryCacheResultHandle> handle1_1 =
  254. sec_cache->Lookup(key1, GetHelper(), this, true, /*advise_erase=*/false,
  255. /*stats=*/nullptr, kept_in_sec_cache);
  256. ASSERT_EQ(handle1_1, nullptr);
  257. // Create Fails.
  258. SetFailCreate(true);
  259. std::unique_ptr<SecondaryCacheResultHandle> handle2_1 =
  260. sec_cache->Lookup(key2, GetHelper(), this, true, /*advise_erase=*/true,
  261. /*stats=*/nullptr, kept_in_sec_cache);
  262. ASSERT_EQ(handle2_1, nullptr);
  263. // Save Fails.
  264. std::string str3 = rnd.RandomString(10);
  265. TestItem item3(str3.data(), str3.length());
  266. // The first Status is OK because a dummy handle is inserted.
  267. ASSERT_OK(sec_cache->Insert(key3, &item3, GetHelperFail(), false));
  268. ASSERT_NOK(sec_cache->Insert(key3, &item3, GetHelperFail(), false));
  269. sec_cache.reset();
  270. }
  271. void BasicIntegrationTest(bool sec_cache_is_compressed,
  272. bool enable_custom_split_merge) {
  273. CompressedSecondaryCacheOptions secondary_cache_opts;
  274. if (sec_cache_is_compressed) {
  275. if (!LZ4_Supported()) {
  276. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  277. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  278. sec_cache_is_compressed = false;
  279. }
  280. } else {
  281. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  282. }
  283. secondary_cache_opts.capacity = 6000;
  284. secondary_cache_opts.num_shard_bits = 0;
  285. secondary_cache_opts.enable_custom_split_merge = enable_custom_split_merge;
  286. std::shared_ptr<SecondaryCache> secondary_cache =
  287. NewCompressedSecondaryCache(secondary_cache_opts);
  288. std::shared_ptr<Cache> cache = NewCache(
  289. /*_capacity =*/1300, /*_num_shard_bits =*/0,
  290. /*_strict_capacity_limit =*/true, secondary_cache);
  291. std::shared_ptr<Statistics> stats = CreateDBStatistics();
  292. get_perf_context()->Reset();
  293. Random rnd(301);
  294. std::string str1 = test::CompressibleString(&rnd, 0.5, 1001);
  295. auto item1_1 = new TestItem(str1.data(), str1.length());
  296. ASSERT_OK(cache->Insert(key1, item1_1, GetHelper(), str1.length()));
  297. std::string str2 = test::CompressibleString(&rnd, 0.5, 1012);
  298. auto item2_1 = new TestItem(str2.data(), str2.length());
  299. // After this Insert, primary cache contains k2 and secondary cache contains
  300. // k1's dummy item.
  301. ASSERT_OK(cache->Insert(key2, item2_1, GetHelper(), str2.length()));
  302. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 1);
  303. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  304. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  305. std::string str3 = test::CompressibleString(&rnd, 0.5, 1024);
  306. auto item3_1 = new TestItem(str3.data(), str3.length());
  307. // After this Insert, primary cache contains k3 and secondary cache contains
  308. // k1's dummy item and k2's dummy item.
  309. ASSERT_OK(cache->Insert(key3, item3_1, GetHelper(), str3.length()));
  310. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 2);
  311. // After this Insert, primary cache contains k1 and secondary cache contains
  312. // k1's dummy item, k2's dummy item, and k3's dummy item.
  313. auto item1_2 = new TestItem(str1.data(), str1.length());
  314. ASSERT_OK(cache->Insert(key1, item1_2, GetHelper(), str1.length()));
  315. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 3);
  316. // After this Insert, primary cache contains k2 and secondary cache contains
  317. // k1's item, k2's dummy item, and k3's dummy item.
  318. auto item2_2 = new TestItem(str2.data(), str2.length());
  319. ASSERT_OK(cache->Insert(key2, item2_2, GetHelper(), str2.length()));
  320. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1);
  321. if (sec_cache_is_compressed) {
  322. ASSERT_EQ(
  323. Pop(get_perf_context()->compressed_sec_cache_uncompressed_bytes),
  324. str1.length());
  325. ASSERT_LT(get_perf_context()->compressed_sec_cache_compressed_bytes,
  326. str1.length());
  327. ASSERT_GT(Pop(get_perf_context()->compressed_sec_cache_compressed_bytes),
  328. str1.length() / 10);
  329. } else {
  330. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  331. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  332. }
  333. // After this Insert, primary cache contains k3 and secondary cache contains
  334. // k1's item and k2's item.
  335. auto item3_2 = new TestItem(str3.data(), str3.length());
  336. ASSERT_OK(cache->Insert(key3, item3_2, GetHelper(), str3.length()));
  337. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 2);
  338. if (sec_cache_is_compressed) {
  339. ASSERT_EQ(
  340. Pop(get_perf_context()->compressed_sec_cache_uncompressed_bytes),
  341. str2.length());
  342. ASSERT_LT(get_perf_context()->compressed_sec_cache_compressed_bytes,
  343. str2.length());
  344. ASSERT_GT(Pop(get_perf_context()->compressed_sec_cache_compressed_bytes),
  345. str2.length() / 10);
  346. } else {
  347. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  348. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  349. }
  350. Cache::Handle* handle;
  351. handle = cache->Lookup(key3, GetHelper(), this, Cache::Priority::LOW,
  352. stats.get());
  353. ASSERT_NE(handle, nullptr);
  354. auto val3 = static_cast<TestItem*>(cache->Value(handle));
  355. ASSERT_NE(val3, nullptr);
  356. ASSERT_EQ(memcmp(val3->Buf(), item3_2->Buf(), item3_2->Size()), 0);
  357. cache->Release(handle);
  358. // Lookup an non-existent key.
  359. handle = cache->Lookup(key0, GetHelper(), this, Cache::Priority::LOW,
  360. stats.get());
  361. ASSERT_EQ(handle, nullptr);
  362. // This Lookup should just insert a dummy handle in the primary cache
  363. // and the k1 is still in the secondary cache.
  364. handle = cache->Lookup(key1, GetHelper(), this, Cache::Priority::LOW,
  365. stats.get());
  366. ASSERT_NE(handle, nullptr);
  367. ASSERT_EQ(get_perf_context()->block_cache_standalone_handle_count, 1);
  368. auto val1_1 = static_cast<TestItem*>(cache->Value(handle));
  369. ASSERT_NE(val1_1, nullptr);
  370. ASSERT_EQ(memcmp(val1_1->Buf(), str1.data(), str1.size()), 0);
  371. cache->Release(handle);
  372. // This Lookup should erase k1 from the secondary cache and insert
  373. // it into primary cache; then k3 is demoted.
  374. // k2 and k3 are in secondary cache.
  375. handle = cache->Lookup(key1, GetHelper(), this, Cache::Priority::LOW,
  376. stats.get());
  377. ASSERT_NE(handle, nullptr);
  378. ASSERT_EQ(get_perf_context()->block_cache_standalone_handle_count, 1);
  379. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 3);
  380. cache->Release(handle);
  381. // k2 is still in secondary cache.
  382. handle = cache->Lookup(key2, GetHelper(), this, Cache::Priority::LOW,
  383. stats.get());
  384. ASSERT_NE(handle, nullptr);
  385. ASSERT_EQ(get_perf_context()->block_cache_standalone_handle_count, 2);
  386. cache->Release(handle);
  387. // Testing SetCapacity().
  388. ASSERT_OK(secondary_cache->SetCapacity(0));
  389. handle = cache->Lookup(key3, GetHelper(), this, Cache::Priority::LOW,
  390. stats.get());
  391. ASSERT_EQ(handle, nullptr);
  392. ASSERT_OK(secondary_cache->SetCapacity(7000));
  393. size_t capacity;
  394. ASSERT_OK(secondary_cache->GetCapacity(capacity));
  395. ASSERT_EQ(capacity, 7000);
  396. auto item1_3 = new TestItem(str1.data(), str1.length());
  397. // After this Insert, primary cache contains k1.
  398. ASSERT_OK(cache->Insert(key1, item1_3, GetHelper(), str2.length()));
  399. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 3);
  400. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 4);
  401. auto item2_3 = new TestItem(str2.data(), str2.length());
  402. // After this Insert, primary cache contains k2 and secondary cache contains
  403. // k1's dummy item.
  404. ASSERT_OK(cache->Insert(key2, item2_3, GetHelper(), str1.length()));
  405. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 4);
  406. auto item1_4 = new TestItem(str1.data(), str1.length());
  407. // After this Insert, primary cache contains k1 and secondary cache contains
  408. // k1's dummy item and k2's dummy item.
  409. ASSERT_OK(cache->Insert(key1, item1_4, GetHelper(), str2.length()));
  410. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 5);
  411. auto item2_4 = new TestItem(str2.data(), str2.length());
  412. // After this Insert, primary cache contains k2 and secondary cache contains
  413. // k1's real item and k2's dummy item.
  414. ASSERT_OK(cache->Insert(key2, item2_4, GetHelper(), str2.length()));
  415. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 5);
  416. // This Lookup should just insert a dummy handle in the primary cache
  417. // and the k1 is still in the secondary cache.
  418. handle = cache->Lookup(key1, GetHelper(), this, Cache::Priority::LOW,
  419. stats.get());
  420. ASSERT_NE(handle, nullptr);
  421. cache->Release(handle);
  422. ASSERT_EQ(get_perf_context()->block_cache_standalone_handle_count, 3);
  423. cache.reset();
  424. secondary_cache.reset();
  425. }
  426. void BasicIntegrationFailTest(bool sec_cache_is_compressed) {
  427. CompressedSecondaryCacheOptions secondary_cache_opts;
  428. if (sec_cache_is_compressed) {
  429. if (!LZ4_Supported()) {
  430. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  431. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  432. }
  433. } else {
  434. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  435. }
  436. secondary_cache_opts.capacity = 6000;
  437. secondary_cache_opts.num_shard_bits = 0;
  438. std::shared_ptr<SecondaryCache> secondary_cache =
  439. NewCompressedSecondaryCache(secondary_cache_opts);
  440. std::shared_ptr<Cache> cache = NewCache(
  441. /*_capacity=*/1300, /*_num_shard_bits=*/0,
  442. /*_strict_capacity_limit=*/false, secondary_cache);
  443. Random rnd(301);
  444. std::string str1 = rnd.RandomString(1001);
  445. auto item1 = std::make_unique<TestItem>(str1.data(), str1.length());
  446. ASSERT_OK(cache->Insert(key1, item1.get(), GetHelper(), str1.length()));
  447. item1.release(); // Appease clang-analyze "potential memory leak"
  448. Cache::Handle* handle;
  449. handle = cache->Lookup(key2, nullptr, this, Cache::Priority::LOW);
  450. ASSERT_EQ(handle, nullptr);
  451. handle = cache->Lookup(key2, GetHelper(), this, Cache::Priority::LOW);
  452. ASSERT_EQ(handle, nullptr);
  453. Cache::AsyncLookupHandle ah;
  454. ah.key = key2;
  455. ah.helper = GetHelper();
  456. ah.create_context = this;
  457. ah.priority = Cache::Priority::LOW;
  458. cache->StartAsyncLookup(ah);
  459. cache->Wait(ah);
  460. ASSERT_EQ(ah.Result(), nullptr);
  461. cache.reset();
  462. secondary_cache.reset();
  463. }
  464. void IntegrationSaveFailTest(bool sec_cache_is_compressed) {
  465. CompressedSecondaryCacheOptions secondary_cache_opts;
  466. if (sec_cache_is_compressed) {
  467. if (!LZ4_Supported()) {
  468. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  469. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  470. }
  471. } else {
  472. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  473. }
  474. secondary_cache_opts.capacity = 6000;
  475. secondary_cache_opts.num_shard_bits = 0;
  476. std::shared_ptr<SecondaryCache> secondary_cache =
  477. NewCompressedSecondaryCache(secondary_cache_opts);
  478. std::shared_ptr<Cache> cache = NewCache(
  479. /*_capacity=*/1300, /*_num_shard_bits=*/0,
  480. /*_strict_capacity_limit=*/true, secondary_cache);
  481. Random rnd(301);
  482. std::string str1 = rnd.RandomString(1001);
  483. auto item1 = new TestItem(str1.data(), str1.length());
  484. ASSERT_OK(cache->Insert(key1, item1, GetHelperFail(), str1.length()));
  485. std::string str2 = rnd.RandomString(1002);
  486. auto item2 = new TestItem(str2.data(), str2.length());
  487. // k1 should be demoted to the secondary cache.
  488. ASSERT_OK(cache->Insert(key2, item2, GetHelperFail(), str2.length()));
  489. Cache::Handle* handle;
  490. handle = cache->Lookup(key2, GetHelperFail(), this, Cache::Priority::LOW);
  491. ASSERT_NE(handle, nullptr);
  492. cache->Release(handle);
  493. // This lookup should fail, since k1 demotion would have failed.
  494. handle = cache->Lookup(key1, GetHelperFail(), this, Cache::Priority::LOW);
  495. ASSERT_EQ(handle, nullptr);
  496. // Since k1 was not promoted, k2 should still be in cache.
  497. handle = cache->Lookup(key2, GetHelperFail(), this, Cache::Priority::LOW);
  498. ASSERT_NE(handle, nullptr);
  499. cache->Release(handle);
  500. cache.reset();
  501. secondary_cache.reset();
  502. }
  503. void IntegrationCreateFailTest(bool sec_cache_is_compressed) {
  504. CompressedSecondaryCacheOptions secondary_cache_opts;
  505. if (sec_cache_is_compressed) {
  506. if (!LZ4_Supported()) {
  507. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  508. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  509. }
  510. } else {
  511. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  512. }
  513. secondary_cache_opts.capacity = 6000;
  514. secondary_cache_opts.num_shard_bits = 0;
  515. std::shared_ptr<SecondaryCache> secondary_cache =
  516. NewCompressedSecondaryCache(secondary_cache_opts);
  517. std::shared_ptr<Cache> cache = NewCache(
  518. /*_capacity=*/1300, /*_num_shard_bits=*/0,
  519. /*_strict_capacity_limit=*/true, secondary_cache);
  520. Random rnd(301);
  521. std::string str1 = rnd.RandomString(1001);
  522. auto item1 = new TestItem(str1.data(), str1.length());
  523. ASSERT_OK(cache->Insert(key1, item1, GetHelper(), str1.length()));
  524. std::string str2 = rnd.RandomString(1002);
  525. auto item2 = new TestItem(str2.data(), str2.length());
  526. // k1 should be demoted to the secondary cache.
  527. ASSERT_OK(cache->Insert(key2, item2, GetHelper(), str2.length()));
  528. Cache::Handle* handle;
  529. SetFailCreate(true);
  530. handle = cache->Lookup(key2, GetHelper(), this, Cache::Priority::LOW);
  531. ASSERT_NE(handle, nullptr);
  532. cache->Release(handle);
  533. // This lookup should fail, since k1 creation would have failed
  534. handle = cache->Lookup(key1, GetHelper(), this, Cache::Priority::LOW);
  535. ASSERT_EQ(handle, nullptr);
  536. // Since k1 didn't get promoted, k2 should still be in cache
  537. handle = cache->Lookup(key2, GetHelper(), this, Cache::Priority::LOW);
  538. ASSERT_NE(handle, nullptr);
  539. cache->Release(handle);
  540. cache.reset();
  541. secondary_cache.reset();
  542. }
  543. void IntegrationFullCapacityTest(bool sec_cache_is_compressed) {
  544. CompressedSecondaryCacheOptions secondary_cache_opts;
  545. if (sec_cache_is_compressed) {
  546. if (!LZ4_Supported()) {
  547. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  548. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  549. }
  550. } else {
  551. secondary_cache_opts.compression_type = CompressionType::kNoCompression;
  552. }
  553. secondary_cache_opts.capacity = 6000;
  554. secondary_cache_opts.num_shard_bits = 0;
  555. std::shared_ptr<SecondaryCache> secondary_cache =
  556. NewCompressedSecondaryCache(secondary_cache_opts);
  557. std::shared_ptr<Cache> cache = NewCache(
  558. /*_capacity=*/1300, /*_num_shard_bits=*/0,
  559. /*_strict_capacity_limit=*/false, secondary_cache);
  560. Random rnd(301);
  561. std::string str1 = rnd.RandomString(1001);
  562. auto item1_1 = new TestItem(str1.data(), str1.length());
  563. ASSERT_OK(cache->Insert(key1, item1_1, GetHelper(), str1.length()));
  564. std::string str2 = rnd.RandomString(1002);
  565. std::string str2_clone{str2};
  566. auto item2 = new TestItem(str2.data(), str2.length());
  567. // After this Insert, primary cache contains k2 and secondary cache contains
  568. // k1's dummy item.
  569. ASSERT_OK(cache->Insert(key2, item2, GetHelper(), str2.length()));
  570. // After this Insert, primary cache contains k1 and secondary cache contains
  571. // k1's dummy item and k2's dummy item.
  572. auto item1_2 = new TestItem(str1.data(), str1.length());
  573. ASSERT_OK(cache->Insert(key1, item1_2, GetHelper(), str1.length()));
  574. auto item2_2 = new TestItem(str2.data(), str2.length());
  575. // After this Insert, primary cache contains k2 and secondary cache contains
  576. // k1's item and k2's dummy item.
  577. ASSERT_OK(cache->Insert(key2, item2_2, GetHelper(), str2.length()));
  578. Cache::Handle* handle2;
  579. handle2 = cache->Lookup(key2, GetHelper(), this, Cache::Priority::LOW);
  580. ASSERT_NE(handle2, nullptr);
  581. cache->Release(handle2);
  582. // k1 promotion should fail because cache is at capacity and
  583. // strict_capacity_limit is true, but the lookup should still succeed.
  584. // A k1's dummy item is inserted into primary cache.
  585. Cache::Handle* handle1;
  586. handle1 = cache->Lookup(key1, GetHelper(), this, Cache::Priority::LOW);
  587. ASSERT_NE(handle1, nullptr);
  588. cache->Release(handle1);
  589. // Since k1 didn't get inserted, k2 should still be in cache
  590. handle2 = cache->Lookup(key2, GetHelper(), this, Cache::Priority::LOW);
  591. ASSERT_NE(handle2, nullptr);
  592. cache->Release(handle2);
  593. cache.reset();
  594. secondary_cache.reset();
  595. }
  596. void SplitValueIntoChunksTest() {
  597. JemallocAllocatorOptions jopts;
  598. std::shared_ptr<MemoryAllocator> allocator;
  599. std::string msg;
  600. if (JemallocNodumpAllocator::IsSupported(&msg)) {
  601. Status s = NewJemallocNodumpAllocator(jopts, &allocator);
  602. if (!s.ok()) {
  603. ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
  604. }
  605. } else {
  606. ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
  607. }
  608. using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
  609. std::unique_ptr<CompressedSecondaryCache> sec_cache =
  610. std::make_unique<CompressedSecondaryCache>(
  611. CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0,
  612. allocator));
  613. Random rnd(301);
  614. // 8500 = 8169 + 233 + 98, so there should be 3 chunks after split.
  615. size_t str_size{8500};
  616. std::string str = rnd.RandomString(static_cast<int>(str_size));
  617. size_t charge{0};
  618. CacheValueChunk* chunks_head = sec_cache->SplitValueIntoChunks(str, charge);
  619. ASSERT_EQ(charge, str_size + 3 * (sizeof(CacheValueChunk) - 1));
  620. CacheValueChunk* current_chunk = chunks_head;
  621. ASSERT_EQ(current_chunk->size, 8192 - sizeof(CacheValueChunk) + 1);
  622. current_chunk = current_chunk->next;
  623. ASSERT_EQ(current_chunk->size, 256 - sizeof(CacheValueChunk) + 1);
  624. current_chunk = current_chunk->next;
  625. ASSERT_EQ(current_chunk->size, 98);
  626. sec_cache->GetHelper(true)->del_cb(chunks_head, /*alloc*/ nullptr);
  627. }
  628. void MergeChunksIntoValueTest() {
  629. using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
  630. Random rnd(301);
  631. size_t size1{2048};
  632. std::string str1 = rnd.RandomString(static_cast<int>(size1));
  633. CacheValueChunk* current_chunk = reinterpret_cast<CacheValueChunk*>(
  634. new char[sizeof(CacheValueChunk) - 1 + size1]);
  635. CacheValueChunk* chunks_head = current_chunk;
  636. memcpy(current_chunk->data, str1.data(), size1);
  637. current_chunk->size = size1;
  638. size_t size2{256};
  639. std::string str2 = rnd.RandomString(static_cast<int>(size2));
  640. current_chunk->next = reinterpret_cast<CacheValueChunk*>(
  641. new char[sizeof(CacheValueChunk) - 1 + size2]);
  642. current_chunk = current_chunk->next;
  643. memcpy(current_chunk->data, str2.data(), size2);
  644. current_chunk->size = size2;
  645. size_t size3{31};
  646. std::string str3 = rnd.RandomString(static_cast<int>(size3));
  647. current_chunk->next = reinterpret_cast<CacheValueChunk*>(
  648. new char[sizeof(CacheValueChunk) - 1 + size3]);
  649. current_chunk = current_chunk->next;
  650. memcpy(current_chunk->data, str3.data(), size3);
  651. current_chunk->size = size3;
  652. current_chunk->next = nullptr;
  653. std::string str = str1 + str2 + str3;
  654. std::unique_ptr<CompressedSecondaryCache> sec_cache =
  655. std::make_unique<CompressedSecondaryCache>(
  656. CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0));
  657. std::string value_str = sec_cache->MergeChunksIntoValue(chunks_head);
  658. ASSERT_EQ(value_str.size(), size1 + size2 + size3);
  659. ASSERT_EQ(value_str, str);
  660. while (chunks_head != nullptr) {
  661. CacheValueChunk* tmp_chunk = chunks_head;
  662. chunks_head = chunks_head->next;
  663. tmp_chunk->Free();
  664. }
  665. }
  666. void SplictValueAndMergeChunksTest() {
  667. JemallocAllocatorOptions jopts;
  668. std::shared_ptr<MemoryAllocator> allocator;
  669. std::string msg;
  670. if (JemallocNodumpAllocator::IsSupported(&msg)) {
  671. Status s = NewJemallocNodumpAllocator(jopts, &allocator);
  672. if (!s.ok()) {
  673. ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
  674. }
  675. } else {
  676. ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
  677. }
  678. using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
  679. std::unique_ptr<CompressedSecondaryCache> sec_cache =
  680. std::make_unique<CompressedSecondaryCache>(
  681. CompressedSecondaryCacheOptions(1000, 0, true, 0.5, 0.0,
  682. allocator));
  683. Random rnd(301);
  684. // 8500 = 8169 + 233 + 98, so there should be 3 chunks after split.
  685. size_t str_size{8500};
  686. std::string str = rnd.RandomString(static_cast<int>(str_size));
  687. size_t charge{0};
  688. CacheValueChunk* chunks_head = sec_cache->SplitValueIntoChunks(str, charge);
  689. ASSERT_EQ(charge, str_size + 3 * (sizeof(CacheValueChunk) - 1));
  690. std::string value_str = sec_cache->MergeChunksIntoValue(chunks_head);
  691. ASSERT_EQ(value_str.size(), str_size);
  692. ASSERT_EQ(value_str, str);
  693. sec_cache->GetHelper(true)->del_cb(chunks_head, /*alloc*/ nullptr);
  694. }
  695. };
  696. class CompressedSecondaryCacheTest
  697. : public CompressedSecondaryCacheTestBase,
  698. public testing::WithParamInterface<std::string> {
  699. const std::string& Type() const override { return GetParam(); }
  700. };
  701. INSTANTIATE_TEST_CASE_P(CompressedSecondaryCacheTest,
  702. CompressedSecondaryCacheTest, GetTestingCacheTypes());
  703. class CompressedSecCacheTestWithCompressAndAllocatorParam
  704. : public CompressedSecondaryCacheTestBase,
  705. public ::testing::WithParamInterface<
  706. std::tuple<bool, bool, std::string>> {
  707. public:
  708. CompressedSecCacheTestWithCompressAndAllocatorParam() {
  709. sec_cache_is_compressed_ = std::get<0>(GetParam());
  710. use_jemalloc_ = std::get<1>(GetParam());
  711. }
  712. const std::string& Type() const override { return std::get<2>(GetParam()); }
  713. bool sec_cache_is_compressed_;
  714. bool use_jemalloc_;
  715. };
  716. TEST_P(CompressedSecCacheTestWithCompressAndAllocatorParam, BasicTes) {
  717. BasicTest(sec_cache_is_compressed_, use_jemalloc_);
  718. }
  719. INSTANTIATE_TEST_CASE_P(CompressedSecCacheTests,
  720. CompressedSecCacheTestWithCompressAndAllocatorParam,
  721. ::testing::Combine(testing::Bool(), testing::Bool(),
  722. GetTestingCacheTypes()));
  723. class CompressedSecondaryCacheTestWithCompressionParam
  724. : public CompressedSecondaryCacheTestBase,
  725. public ::testing::WithParamInterface<std::tuple<bool, std::string>> {
  726. public:
  727. CompressedSecondaryCacheTestWithCompressionParam() {
  728. sec_cache_is_compressed_ = std::get<0>(GetParam());
  729. }
  730. const std::string& Type() const override { return std::get<1>(GetParam()); }
  731. bool sec_cache_is_compressed_;
  732. };
  733. TEST_P(CompressedSecondaryCacheTestWithCompressionParam, BasicTestFromString) {
  734. std::shared_ptr<SecondaryCache> sec_cache{nullptr};
  735. std::string sec_cache_uri;
  736. if (sec_cache_is_compressed_) {
  737. if (LZ4_Supported()) {
  738. sec_cache_uri =
  739. "compressed_secondary_cache://"
  740. "capacity=2048;num_shard_bits=0;compression_type=kLZ4Compression;"
  741. "compress_format_version=2";
  742. } else {
  743. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  744. sec_cache_uri =
  745. "compressed_secondary_cache://"
  746. "capacity=2048;num_shard_bits=0;compression_type=kNoCompression";
  747. sec_cache_is_compressed_ = false;
  748. }
  749. Status s = SecondaryCache::CreateFromString(ConfigOptions(), sec_cache_uri,
  750. &sec_cache);
  751. EXPECT_OK(s);
  752. } else {
  753. sec_cache_uri =
  754. "compressed_secondary_cache://"
  755. "capacity=2048;num_shard_bits=0;compression_type=kNoCompression";
  756. Status s = SecondaryCache::CreateFromString(ConfigOptions(), sec_cache_uri,
  757. &sec_cache);
  758. EXPECT_OK(s);
  759. }
  760. BasicTestHelper(sec_cache, sec_cache_is_compressed_);
  761. }
  762. TEST_P(CompressedSecondaryCacheTestWithCompressionParam,
  763. BasicTestFromStringWithSplit) {
  764. std::shared_ptr<SecondaryCache> sec_cache{nullptr};
  765. std::string sec_cache_uri;
  766. if (sec_cache_is_compressed_) {
  767. if (LZ4_Supported()) {
  768. sec_cache_uri =
  769. "compressed_secondary_cache://"
  770. "capacity=2048;num_shard_bits=0;compression_type=kLZ4Compression;"
  771. "compress_format_version=2;enable_custom_split_merge=true";
  772. } else {
  773. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  774. sec_cache_uri =
  775. "compressed_secondary_cache://"
  776. "capacity=2048;num_shard_bits=0;compression_type=kNoCompression;"
  777. "enable_custom_split_merge=true";
  778. sec_cache_is_compressed_ = false;
  779. }
  780. Status s = SecondaryCache::CreateFromString(ConfigOptions(), sec_cache_uri,
  781. &sec_cache);
  782. EXPECT_OK(s);
  783. } else {
  784. sec_cache_uri =
  785. "compressed_secondary_cache://"
  786. "capacity=2048;num_shard_bits=0;compression_type=kNoCompression;"
  787. "enable_custom_split_merge=true";
  788. Status s = SecondaryCache::CreateFromString(ConfigOptions(), sec_cache_uri,
  789. &sec_cache);
  790. EXPECT_OK(s);
  791. }
  792. BasicTestHelper(sec_cache, sec_cache_is_compressed_);
  793. }
  794. TEST_P(CompressedSecondaryCacheTestWithCompressionParam, FailsTest) {
  795. FailsTest(sec_cache_is_compressed_);
  796. }
  797. TEST_P(CompressedSecondaryCacheTestWithCompressionParam,
  798. BasicIntegrationFailTest) {
  799. BasicIntegrationFailTest(sec_cache_is_compressed_);
  800. }
  801. TEST_P(CompressedSecondaryCacheTestWithCompressionParam,
  802. IntegrationSaveFailTest) {
  803. IntegrationSaveFailTest(sec_cache_is_compressed_);
  804. }
  805. TEST_P(CompressedSecondaryCacheTestWithCompressionParam,
  806. IntegrationCreateFailTest) {
  807. IntegrationCreateFailTest(sec_cache_is_compressed_);
  808. }
  809. TEST_P(CompressedSecondaryCacheTestWithCompressionParam,
  810. IntegrationFullCapacityTest) {
  811. IntegrationFullCapacityTest(sec_cache_is_compressed_);
  812. }
  813. TEST_P(CompressedSecondaryCacheTestWithCompressionParam, EntryRoles) {
  814. CompressedSecondaryCacheOptions opts;
  815. opts.capacity = 2048;
  816. opts.num_shard_bits = 0;
  817. if (sec_cache_is_compressed_) {
  818. if (!LZ4_Supported()) {
  819. ROCKSDB_GTEST_SKIP("This test requires LZ4 support.");
  820. return;
  821. }
  822. } else {
  823. opts.compression_type = CompressionType::kNoCompression;
  824. }
  825. // Select a random subset to include, for fast test
  826. Random& r = *Random::GetTLSInstance();
  827. CacheEntryRoleSet do_not_compress;
  828. for (uint32_t i = 0; i < kNumCacheEntryRoles; ++i) {
  829. // A few included on average, but decent chance of zero
  830. if (r.OneIn(5)) {
  831. do_not_compress.Add(static_cast<CacheEntryRole>(i));
  832. }
  833. }
  834. opts.do_not_compress_roles = do_not_compress;
  835. std::shared_ptr<SecondaryCache> sec_cache = NewCompressedSecondaryCache(opts);
  836. Random rnd(301);
  837. std::string junk = test::CompressibleString(&rnd, 0.5, 1000);
  838. for (uint32_t i = 0; i < kNumCacheEntryRoles; ++i) {
  839. CacheEntryRole role = static_cast<CacheEntryRole>(i);
  840. // Uniquify `junk`
  841. junk[0] = static_cast<char>(i);
  842. TestItem item{junk.data(), junk.length()};
  843. Slice ith_key = Slice(junk.data(), 16);
  844. get_perf_context()->Reset();
  845. ASSERT_OK(sec_cache->Insert(ith_key, &item, GetHelper(role), false));
  846. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 1U);
  847. ASSERT_OK(sec_cache->Insert(ith_key, &item, GetHelper(role), false));
  848. ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1U);
  849. bool kept_in_sec_cache{true};
  850. std::unique_ptr<SecondaryCacheResultHandle> handle = sec_cache->Lookup(
  851. ith_key, GetHelper(role), this, true,
  852. /*advise_erase=*/true, /*stats=*/nullptr, kept_in_sec_cache);
  853. ASSERT_NE(handle, nullptr);
  854. // Lookup returns the right data
  855. std::unique_ptr<TestItem> val =
  856. std::unique_ptr<TestItem>(static_cast<TestItem*>(handle->Value()));
  857. ASSERT_NE(val, nullptr);
  858. ASSERT_EQ(memcmp(val->Buf(), item.Buf(), item.Size()), 0);
  859. bool compressed =
  860. sec_cache_is_compressed_ && !do_not_compress.Contains(role);
  861. if (compressed) {
  862. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes,
  863. junk.length());
  864. ASSERT_LT(get_perf_context()->compressed_sec_cache_compressed_bytes,
  865. junk.length() * 3 / 4);
  866. ASSERT_GT(get_perf_context()->compressed_sec_cache_compressed_bytes,
  867. junk.length() / 4);
  868. } else {
  869. ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes, 0);
  870. ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
  871. }
  872. }
  873. }
  874. INSTANTIATE_TEST_CASE_P(CompressedSecCacheTests,
  875. CompressedSecondaryCacheTestWithCompressionParam,
  876. testing::Combine(testing::Bool(),
  877. GetTestingCacheTypes()));
  878. class CompressedSecCacheTestWithCompressAndSplitParam
  879. : public CompressedSecondaryCacheTestBase,
  880. public ::testing::WithParamInterface<
  881. std::tuple<bool, bool, std::string>> {
  882. public:
  883. CompressedSecCacheTestWithCompressAndSplitParam() {
  884. sec_cache_is_compressed_ = std::get<0>(GetParam());
  885. enable_custom_split_merge_ = std::get<1>(GetParam());
  886. }
  887. const std::string& Type() const override { return std::get<2>(GetParam()); }
  888. bool sec_cache_is_compressed_;
  889. bool enable_custom_split_merge_;
  890. };
  891. TEST_P(CompressedSecCacheTestWithCompressAndSplitParam, BasicIntegrationTest) {
  892. BasicIntegrationTest(sec_cache_is_compressed_, enable_custom_split_merge_);
  893. }
  894. INSTANTIATE_TEST_CASE_P(CompressedSecCacheTests,
  895. CompressedSecCacheTestWithCompressAndSplitParam,
  896. ::testing::Combine(testing::Bool(), testing::Bool(),
  897. GetTestingCacheTypes()));
  898. TEST_P(CompressedSecondaryCacheTest, SplitValueIntoChunksTest) {
  899. SplitValueIntoChunksTest();
  900. }
  901. TEST_P(CompressedSecondaryCacheTest, MergeChunksIntoValueTest) {
  902. MergeChunksIntoValueTest();
  903. }
  904. TEST_P(CompressedSecondaryCacheTest, SplictValueAndMergeChunksTest) {
  905. SplictValueAndMergeChunksTest();
  906. }
  907. using secondary_cache_test_util::WithCacheType;
  908. class CompressedSecCacheTestWithTiered
  909. : public testing::Test,
  910. public WithCacheType,
  911. public testing::WithParamInterface<
  912. std::tuple<PrimaryCacheType, TieredAdmissionPolicy>> {
  913. public:
  914. using secondary_cache_test_util::WithCacheType::TestItem;
  915. CompressedSecCacheTestWithTiered() {
  916. LRUCacheOptions lru_opts;
  917. HyperClockCacheOptions hcc_opts(
  918. /*_capacity=*/0,
  919. /*_estimated_entry_charge=*/256 << 10,
  920. /*_num_shard_bits=*/0);
  921. // eviction_effort_cap setting simply to avoid churn in existing test
  922. hcc_opts.eviction_effort_cap = 100;
  923. TieredCacheOptions opts;
  924. lru_opts.capacity = 0;
  925. lru_opts.num_shard_bits = 0;
  926. lru_opts.high_pri_pool_ratio = 0;
  927. opts.cache_type = std::get<0>(GetParam());
  928. if (opts.cache_type == PrimaryCacheType::kCacheTypeLRU) {
  929. opts.cache_opts = &lru_opts;
  930. } else {
  931. opts.cache_opts = &hcc_opts;
  932. }
  933. opts.adm_policy = std::get<1>(GetParam());
  934. ;
  935. opts.comp_cache_opts.capacity = 0;
  936. opts.comp_cache_opts.num_shard_bits = 0;
  937. opts.total_capacity = 100 << 20;
  938. opts.compressed_secondary_ratio = 0.3;
  939. cache_ = NewTieredCache(opts);
  940. cache_res_mgr_ =
  941. std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
  942. cache_);
  943. }
  944. const std::string& Type() const override {
  945. if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
  946. return lru_str;
  947. } else {
  948. return hcc_str;
  949. }
  950. }
  951. protected:
  952. CacheReservationManager* cache_res_mgr() { return cache_res_mgr_.get(); }
  953. std::shared_ptr<Cache> GetTieredCache() { return cache_; }
  954. Cache* GetCache() {
  955. return static_cast_with_check<CacheWithSecondaryAdapter, Cache>(
  956. cache_.get())
  957. ->TEST_GetCache();
  958. }
  959. SecondaryCache* GetSecondaryCache() {
  960. return static_cast_with_check<CacheWithSecondaryAdapter, Cache>(
  961. cache_.get())
  962. ->TEST_GetSecondaryCache();
  963. }
  964. size_t GetPercent(size_t val, unsigned int percent) {
  965. return static_cast<size_t>(val * percent / 100);
  966. }
  967. private:
  968. std::shared_ptr<Cache> cache_;
  969. std::shared_ptr<CacheReservationManager> cache_res_mgr_;
  970. static std::string lru_str;
  971. static std::string hcc_str;
  972. };
  973. std::string CompressedSecCacheTestWithTiered::lru_str(WithCacheType::kLRU);
  974. std::string CompressedSecCacheTestWithTiered::hcc_str(
  975. WithCacheType::kFixedHyperClock);
  976. bool CacheUsageWithinBounds(size_t val1, size_t val2, size_t error) {
  977. return ((val1 < (val2 + error)) && (val1 > (val2 - error)));
  978. }
  979. TEST_P(CompressedSecCacheTestWithTiered, CacheReservationManager) {
  980. CompressedSecondaryCache* sec_cache =
  981. static_cast<CompressedSecondaryCache*>(GetSecondaryCache());
  982. // Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
  983. // double explicit casts
  984. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  985. GetPercent(30 << 20, 1));
  986. EXPECT_EQ(sec_cache->TEST_GetUsage(), 0);
  987. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(10 << 20));
  988. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
  989. GetPercent(37 << 20, 1));
  990. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  991. GetPercent(3 << 20, 1));
  992. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
  993. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  994. GetPercent(30 << 20, 1));
  995. EXPECT_EQ(sec_cache->TEST_GetUsage(), 0);
  996. }
  997. TEST_P(CompressedSecCacheTestWithTiered,
  998. CacheReservationManagerMultipleUpdate) {
  999. CompressedSecondaryCache* sec_cache =
  1000. static_cast<CompressedSecondaryCache*>(GetSecondaryCache());
  1001. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  1002. GetPercent(30 << 20, 1));
  1003. EXPECT_EQ(sec_cache->TEST_GetUsage(), 0);
  1004. int i;
  1005. for (i = 0; i < 10; ++i) {
  1006. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation((1 + i) << 20));
  1007. }
  1008. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
  1009. GetPercent(37 << 20, 1));
  1010. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  1011. GetPercent(3 << 20, 1));
  1012. for (i = 10; i > 0; --i) {
  1013. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(((i - 1) << 20)));
  1014. }
  1015. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  1016. GetPercent(30 << 20, 1));
  1017. EXPECT_EQ(sec_cache->TEST_GetUsage(), 0);
  1018. }
  1019. TEST_P(CompressedSecCacheTestWithTiered, AdmissionPolicy) {
  1020. if (!LZ4_Supported()) {
  1021. ROCKSDB_GTEST_BYPASS("This test requires LZ4 support\n");
  1022. return;
  1023. }
  1024. Cache* tiered_cache = GetTieredCache().get();
  1025. Cache* cache = GetCache();
  1026. std::vector<CacheKey> keys;
  1027. std::vector<std::string> vals;
  1028. // Make the item size slightly less than 10MB to ensure we can fit the
  1029. // expected number of items in the cache
  1030. int item_size = (10 << 20) - (1 << 18);
  1031. int i;
  1032. Random rnd(301);
  1033. for (i = 0; i < 14; ++i) {
  1034. keys.emplace_back(CacheKey::CreateUniqueForCacheLifetime(cache));
  1035. vals.emplace_back(rnd.RandomString(item_size));
  1036. }
  1037. for (i = 0; i < 7; ++i) {
  1038. TestItem* item = new TestItem(vals[i].data(), vals[i].length());
  1039. ASSERT_OK(tiered_cache->Insert(keys[i].AsSlice(), item, GetHelper(),
  1040. vals[i].length()));
  1041. }
  1042. Cache::Handle* handle1;
  1043. handle1 = tiered_cache->Lookup(keys[0].AsSlice(), GetHelper(),
  1044. /*context*/ this, Cache::Priority::LOW);
  1045. ASSERT_NE(handle1, nullptr);
  1046. Cache::Handle* handle2;
  1047. handle2 = tiered_cache->Lookup(keys[1].AsSlice(), GetHelper(),
  1048. /*context*/ this, Cache::Priority::LOW);
  1049. ASSERT_NE(handle2, nullptr);
  1050. tiered_cache->Release(handle1);
  1051. tiered_cache->Release(handle2);
  1052. // Flush all previous entries out of the primary cache
  1053. for (i = 7; i < 14; ++i) {
  1054. TestItem* item = new TestItem(vals[i].data(), vals[i].length());
  1055. ASSERT_OK(tiered_cache->Insert(keys[i].AsSlice(), item, GetHelper(),
  1056. vals[i].length()));
  1057. }
  1058. // keys 0 and 1 should be found as they had the hit bit set
  1059. handle1 = tiered_cache->Lookup(keys[0].AsSlice(), GetHelper(),
  1060. /*context*/ this, Cache::Priority::LOW);
  1061. ASSERT_NE(handle1, nullptr);
  1062. handle2 = tiered_cache->Lookup(keys[1].AsSlice(), GetHelper(),
  1063. /*context*/ this, Cache::Priority::LOW);
  1064. ASSERT_NE(handle2, nullptr);
  1065. tiered_cache->Release(handle1);
  1066. tiered_cache->Release(handle2);
  1067. handle1 = tiered_cache->Lookup(keys[2].AsSlice(), GetHelper(),
  1068. /*context*/ this, Cache::Priority::LOW);
  1069. ASSERT_EQ(handle1, nullptr);
  1070. handle1 = tiered_cache->Lookup(keys[3].AsSlice(), GetHelper(),
  1071. /*context*/ this, Cache::Priority::LOW);
  1072. ASSERT_EQ(handle1, nullptr);
  1073. }
  1074. TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdate) {
  1075. CompressedSecondaryCache* sec_cache =
  1076. static_cast<CompressedSecondaryCache*>(GetSecondaryCache());
  1077. std::shared_ptr<Cache> tiered_cache = GetTieredCache();
  1078. // Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
  1079. // double explicit casts
  1080. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  1081. GetPercent(30 << 20, 1));
  1082. size_t sec_capacity;
  1083. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1084. ASSERT_EQ(sec_capacity, (30 << 20));
  1085. ASSERT_OK(UpdateTieredCache(tiered_cache, 130 << 20));
  1086. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (39 << 20),
  1087. GetPercent(39 << 20, 1));
  1088. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1089. ASSERT_EQ(sec_capacity, (39 << 20));
  1090. ASSERT_OK(UpdateTieredCache(tiered_cache, 70 << 20));
  1091. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (21 << 20),
  1092. GetPercent(21 << 20, 1));
  1093. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1094. ASSERT_EQ(sec_capacity, (21 << 20));
  1095. ASSERT_OK(UpdateTieredCache(tiered_cache, 100 << 20));
  1096. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  1097. GetPercent(30 << 20, 1));
  1098. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1099. ASSERT_EQ(sec_capacity, (30 << 20));
  1100. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.4));
  1101. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (40 << 20),
  1102. GetPercent(40 << 20, 1));
  1103. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1104. ASSERT_EQ(sec_capacity, (40 << 20));
  1105. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.2));
  1106. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (20 << 20),
  1107. GetPercent(20 << 20, 1));
  1108. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1109. ASSERT_EQ(sec_capacity, (20 << 20));
  1110. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 1.0));
  1111. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (100 << 20),
  1112. GetPercent(100 << 20, 1));
  1113. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1114. ASSERT_EQ(sec_capacity, 100 << 20);
  1115. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.0));
  1116. // Only check usage for LRU cache. HCC shows a 64KB usage for some reason
  1117. if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
  1118. ASSERT_EQ(GetCache()->GetUsage(), 0);
  1119. }
  1120. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1121. ASSERT_EQ(sec_capacity, 0);
  1122. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.3));
  1123. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
  1124. GetPercent(30 << 20, 1));
  1125. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1126. ASSERT_EQ(sec_capacity, (30 << 20));
  1127. }
  1128. TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdateWithReservation) {
  1129. CompressedSecondaryCache* sec_cache =
  1130. static_cast<CompressedSecondaryCache*>(GetSecondaryCache());
  1131. std::shared_ptr<Cache> tiered_cache = GetTieredCache();
  1132. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(10 << 20));
  1133. // Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
  1134. // double explicit casts
  1135. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
  1136. GetPercent(37 << 20, 1));
  1137. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  1138. GetPercent(3 << 20, 1));
  1139. size_t sec_capacity;
  1140. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1141. ASSERT_EQ(sec_capacity, (30 << 20));
  1142. ASSERT_OK(UpdateTieredCache(tiered_cache, 70 << 20));
  1143. // Only check usage for LRU cache. HCC is slightly off for some reason
  1144. if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
  1145. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (28 << 20),
  1146. GetPercent(28 << 20, 1));
  1147. }
  1148. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  1149. GetPercent(3 << 20, 1));
  1150. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1151. ASSERT_EQ(sec_capacity, (21 << 20));
  1152. ASSERT_OK(UpdateTieredCache(tiered_cache, 130 << 20));
  1153. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (46 << 20),
  1154. GetPercent(46 << 20, 1));
  1155. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  1156. GetPercent(3 << 20, 1));
  1157. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1158. ASSERT_EQ(sec_capacity, (39 << 20));
  1159. ASSERT_OK(UpdateTieredCache(tiered_cache, 100 << 20));
  1160. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
  1161. GetPercent(37 << 20, 1));
  1162. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  1163. GetPercent(3 << 20, 1));
  1164. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1165. ASSERT_EQ(sec_capacity, (30 << 20));
  1166. ASSERT_OK(tiered_cache->GetSecondaryCacheCapacity(sec_capacity));
  1167. ASSERT_EQ(sec_capacity, 30 << 20);
  1168. size_t sec_usage;
  1169. ASSERT_OK(tiered_cache->GetSecondaryCachePinnedUsage(sec_usage));
  1170. EXPECT_PRED3(CacheUsageWithinBounds, sec_usage, 3 << 20,
  1171. GetPercent(3 << 20, 1));
  1172. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.39));
  1173. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (45 << 20),
  1174. GetPercent(45 << 20, 1));
  1175. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (4 << 20),
  1176. GetPercent(4 << 20, 1));
  1177. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1178. ASSERT_EQ(sec_capacity, (39 << 20));
  1179. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.2));
  1180. // Only check usage for LRU cache. HCC is slightly off for some reason
  1181. if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
  1182. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (28 << 20),
  1183. GetPercent(28 << 20, 1));
  1184. }
  1185. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (2 << 20),
  1186. GetPercent(2 << 20, 1));
  1187. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1188. ASSERT_EQ(sec_capacity, (20 << 20));
  1189. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 1.0));
  1190. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (100 << 20),
  1191. GetPercent(100 << 20, 1));
  1192. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (10 << 20),
  1193. GetPercent(10 << 20, 1));
  1194. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1195. ASSERT_EQ(sec_capacity, 100 << 20);
  1196. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.0));
  1197. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (10 << 20),
  1198. GetPercent(10 << 20, 1));
  1199. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1200. ASSERT_EQ(sec_capacity, 0);
  1201. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.3));
  1202. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
  1203. GetPercent(37 << 20, 1));
  1204. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
  1205. GetPercent(3 << 20, 1));
  1206. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1207. ASSERT_EQ(sec_capacity, 30 << 20);
  1208. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
  1209. }
  1210. TEST_P(CompressedSecCacheTestWithTiered, ReservationOverCapacity) {
  1211. CompressedSecondaryCache* sec_cache =
  1212. static_cast<CompressedSecondaryCache*>(GetSecondaryCache());
  1213. std::shared_ptr<Cache> tiered_cache = GetTieredCache();
  1214. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(110 << 20));
  1215. // Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
  1216. // double explicit casts
  1217. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (110 << 20),
  1218. GetPercent(110 << 20, 1));
  1219. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (30 << 20),
  1220. GetPercent(30 << 20, 1));
  1221. size_t sec_capacity;
  1222. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1223. ASSERT_EQ(sec_capacity, (30 << 20));
  1224. ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.39));
  1225. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (110 << 20),
  1226. GetPercent(110 << 20, 1));
  1227. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (39 << 20),
  1228. GetPercent(39 << 20, 1));
  1229. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1230. ASSERT_EQ(sec_capacity, (39 << 20));
  1231. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(90 << 20));
  1232. EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (94 << 20),
  1233. GetPercent(94 << 20, 1));
  1234. EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (35 << 20),
  1235. GetPercent(35 << 20, 1));
  1236. ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
  1237. ASSERT_EQ(sec_capacity, (39 << 20));
  1238. ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
  1239. }
  1240. INSTANTIATE_TEST_CASE_P(
  1241. CompressedSecCacheTests, CompressedSecCacheTestWithTiered,
  1242. ::testing::Values(
  1243. std::make_tuple(PrimaryCacheType::kCacheTypeLRU,
  1244. TieredAdmissionPolicy::kAdmPolicyAllowCacheHits),
  1245. std::make_tuple(PrimaryCacheType::kCacheTypeHCC,
  1246. TieredAdmissionPolicy::kAdmPolicyAllowCacheHits)));
  1247. } // namespace ROCKSDB_NAMESPACE
  1248. int main(int argc, char** argv) {
  1249. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  1250. ::testing::InitGoogleTest(&argc, argv);
  1251. return RUN_ALL_TESTS();
  1252. }