blob_source_test.cc 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619
  1. // Copyright (c) Meta Platforms, Inc. and affiliates.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. #include "db/blob/blob_source.h"
  6. #include <cassert>
  7. #include <cstdint>
  8. #include <cstdio>
  9. #include <memory>
  10. #include <string>
  11. #include "cache/charged_cache.h"
  12. #include "cache/compressed_secondary_cache.h"
  13. #include "db/blob/blob_contents.h"
  14. #include "db/blob/blob_file_cache.h"
  15. #include "db/blob/blob_file_reader.h"
  16. #include "db/blob/blob_log_format.h"
  17. #include "db/blob/blob_log_writer.h"
  18. #include "db/db_test_util.h"
  19. #include "file/filename.h"
  20. #include "file/read_write_util.h"
  21. #include "options/cf_options.h"
  22. #include "rocksdb/options.h"
  23. #include "util/compression.h"
  24. #include "util/random.h"
  25. namespace ROCKSDB_NAMESPACE {
  26. namespace {
  27. // Creates a test blob file with `num` blobs in it.
  28. void WriteBlobFile(const ImmutableOptions& immutable_options,
  29. uint32_t column_family_id, bool has_ttl,
  30. const ExpirationRange& expiration_range_header,
  31. const ExpirationRange& expiration_range_footer,
  32. uint64_t blob_file_number, const std::vector<Slice>& keys,
  33. const std::vector<Slice>& blobs, CompressionType compression,
  34. std::vector<uint64_t>& blob_offsets,
  35. std::vector<uint64_t>& blob_sizes) {
  36. assert(!immutable_options.cf_paths.empty());
  37. size_t num = keys.size();
  38. assert(num == blobs.size());
  39. assert(num == blob_offsets.size());
  40. assert(num == blob_sizes.size());
  41. const std::string blob_file_path =
  42. BlobFileName(immutable_options.cf_paths.front().path, blob_file_number);
  43. std::unique_ptr<FSWritableFile> file;
  44. ASSERT_OK(NewWritableFile(immutable_options.fs.get(), blob_file_path, &file,
  45. FileOptions()));
  46. std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
  47. std::move(file), blob_file_path, FileOptions(), immutable_options.clock));
  48. constexpr Statistics* statistics = nullptr;
  49. constexpr bool use_fsync = false;
  50. constexpr bool do_flush = false;
  51. BlobLogWriter blob_log_writer(std::move(file_writer), immutable_options.clock,
  52. statistics, blob_file_number, use_fsync,
  53. do_flush);
  54. BlobLogHeader header(column_family_id, compression, has_ttl,
  55. expiration_range_header);
  56. ASSERT_OK(blob_log_writer.WriteHeader(WriteOptions(), header));
  57. std::vector<std::string> compressed_blobs(num);
  58. std::vector<Slice> blobs_to_write(num);
  59. if (kNoCompression == compression) {
  60. for (size_t i = 0; i < num; ++i) {
  61. blobs_to_write[i] = blobs[i];
  62. blob_sizes[i] = blobs[i].size();
  63. }
  64. } else {
  65. CompressionOptions opts;
  66. CompressionContext context(compression, opts);
  67. CompressionInfo info(opts, context, CompressionDict::GetEmptyDict(),
  68. compression);
  69. constexpr uint32_t compression_format_version = 2;
  70. for (size_t i = 0; i < num; ++i) {
  71. ASSERT_TRUE(OLD_CompressData(blobs[i], info, compression_format_version,
  72. &compressed_blobs[i]));
  73. blobs_to_write[i] = compressed_blobs[i];
  74. blob_sizes[i] = compressed_blobs[i].size();
  75. }
  76. }
  77. for (size_t i = 0; i < num; ++i) {
  78. uint64_t key_offset = 0;
  79. ASSERT_OK(blob_log_writer.AddRecord(WriteOptions(), keys[i],
  80. blobs_to_write[i], &key_offset,
  81. &blob_offsets[i]));
  82. }
  83. BlobLogFooter footer;
  84. footer.blob_count = num;
  85. footer.expiration_range = expiration_range_footer;
  86. std::string checksum_method;
  87. std::string checksum_value;
  88. ASSERT_OK(blob_log_writer.AppendFooter(WriteOptions(), footer,
  89. &checksum_method, &checksum_value));
  90. }
  91. } // anonymous namespace
  92. class BlobSourceTest : public DBTestBase {
  93. protected:
  94. public:
  95. explicit BlobSourceTest()
  96. : DBTestBase("blob_source_test", /*env_do_fsync=*/true) {
  97. options_.env = env_;
  98. options_.enable_blob_files = true;
  99. options_.create_if_missing = true;
  100. LRUCacheOptions co;
  101. co.capacity = 8 << 20;
  102. co.num_shard_bits = 2;
  103. co.metadata_charge_policy = kDontChargeCacheMetadata;
  104. co.high_pri_pool_ratio = 0.2;
  105. co.low_pri_pool_ratio = 0.2;
  106. options_.blob_cache = NewLRUCache(co);
  107. options_.lowest_used_cache_tier = CacheTier::kVolatileTier;
  108. assert(db_->GetDbIdentity(db_id_).ok());
  109. assert(db_->GetDbSessionId(db_session_id_).ok());
  110. }
  111. Options options_;
  112. std::string db_id_;
  113. std::string db_session_id_;
  114. };
  115. TEST_F(BlobSourceTest, GetBlobsFromCache) {
  116. options_.cf_paths.emplace_back(
  117. test::PerThreadDBPath(env_, "BlobSourceTest_GetBlobsFromCache"), 0);
  118. options_.statistics = CreateDBStatistics();
  119. Statistics* statistics = options_.statistics.get();
  120. assert(statistics);
  121. DestroyAndReopen(options_);
  122. ImmutableOptions immutable_options(options_);
  123. MutableCFOptions mutable_cf_options(options_);
  124. constexpr uint32_t column_family_id = 1;
  125. constexpr bool has_ttl = false;
  126. constexpr ExpirationRange expiration_range;
  127. constexpr uint64_t blob_file_number = 1;
  128. constexpr size_t num_blobs = 16;
  129. std::vector<std::string> key_strs;
  130. std::vector<std::string> blob_strs;
  131. for (size_t i = 0; i < num_blobs; ++i) {
  132. key_strs.push_back("key" + std::to_string(i));
  133. blob_strs.push_back("blob" + std::to_string(i));
  134. }
  135. std::vector<Slice> keys;
  136. std::vector<Slice> blobs;
  137. uint64_t file_size = BlobLogHeader::kSize;
  138. for (size_t i = 0; i < num_blobs; ++i) {
  139. keys.emplace_back(key_strs[i]);
  140. blobs.emplace_back(blob_strs[i]);
  141. file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
  142. }
  143. file_size += BlobLogFooter::kSize;
  144. std::vector<uint64_t> blob_offsets(keys.size());
  145. std::vector<uint64_t> blob_sizes(keys.size());
  146. WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range,
  147. expiration_range, blob_file_number, keys, blobs, kNoCompression,
  148. blob_offsets, blob_sizes);
  149. constexpr size_t capacity = 1024;
  150. std::shared_ptr<Cache> backing_cache =
  151. NewLRUCache(capacity); // Blob file cache
  152. FileOptions file_options;
  153. constexpr HistogramImpl* blob_file_read_hist = nullptr;
  154. std::unique_ptr<BlobFileCache> blob_file_cache =
  155. std::make_unique<BlobFileCache>(
  156. backing_cache.get(), &immutable_options, &file_options,
  157. column_family_id, blob_file_read_hist, nullptr /*IOTracer*/);
  158. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  159. db_session_id_, blob_file_cache.get());
  160. ReadOptions read_options;
  161. read_options.verify_checksums = true;
  162. constexpr FilePrefetchBuffer* prefetch_buffer = nullptr;
  163. {
  164. // GetBlob
  165. std::vector<PinnableSlice> values(keys.size());
  166. uint64_t bytes_read = 0;
  167. uint64_t blob_bytes = 0;
  168. uint64_t total_bytes = 0;
  169. read_options.fill_cache = false;
  170. get_perf_context()->Reset();
  171. for (size_t i = 0; i < num_blobs; ++i) {
  172. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  173. blob_offsets[i]));
  174. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number,
  175. blob_offsets[i], file_size, blob_sizes[i],
  176. kNoCompression, prefetch_buffer, &values[i],
  177. &bytes_read));
  178. ASSERT_EQ(values[i], blobs[i]);
  179. ASSERT_TRUE(values[i].IsPinned());
  180. ASSERT_EQ(bytes_read,
  181. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  182. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  183. blob_offsets[i]));
  184. total_bytes += bytes_read;
  185. }
  186. // Retrieved the blob cache num_blobs * 3 times via TEST_BlobInCache,
  187. // GetBlob, and TEST_BlobInCache.
  188. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, 0);
  189. ASSERT_EQ((int)get_perf_context()->blob_read_count, num_blobs);
  190. ASSERT_EQ((int)get_perf_context()->blob_read_byte, total_bytes);
  191. ASSERT_GE((int)get_perf_context()->blob_checksum_time, 0);
  192. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  193. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 3);
  194. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), 0);
  195. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  196. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ), 0);
  197. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  198. read_options.fill_cache = true;
  199. blob_bytes = 0;
  200. total_bytes = 0;
  201. get_perf_context()->Reset();
  202. statistics->Reset().PermitUncheckedError();
  203. for (size_t i = 0; i < num_blobs; ++i) {
  204. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  205. blob_offsets[i]));
  206. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number,
  207. blob_offsets[i], file_size, blob_sizes[i],
  208. kNoCompression, prefetch_buffer, &values[i],
  209. &bytes_read));
  210. ASSERT_EQ(values[i], blobs[i]);
  211. ASSERT_TRUE(values[i].IsPinned());
  212. ASSERT_EQ(bytes_read,
  213. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  214. blob_bytes += blob_sizes[i];
  215. total_bytes += bytes_read;
  216. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, i);
  217. ASSERT_EQ((int)get_perf_context()->blob_read_count, i + 1);
  218. ASSERT_EQ((int)get_perf_context()->blob_read_byte, total_bytes);
  219. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  220. blob_offsets[i]));
  221. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, i + 1);
  222. ASSERT_EQ((int)get_perf_context()->blob_read_count, i + 1);
  223. ASSERT_EQ((int)get_perf_context()->blob_read_byte, total_bytes);
  224. }
  225. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, num_blobs);
  226. ASSERT_EQ((int)get_perf_context()->blob_read_count, num_blobs);
  227. ASSERT_EQ((int)get_perf_context()->blob_read_byte, total_bytes);
  228. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 2);
  229. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), num_blobs);
  230. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), num_blobs);
  231. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ), blob_bytes);
  232. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE),
  233. blob_bytes);
  234. read_options.fill_cache = true;
  235. total_bytes = 0;
  236. blob_bytes = 0;
  237. get_perf_context()->Reset();
  238. statistics->Reset().PermitUncheckedError();
  239. for (size_t i = 0; i < num_blobs; ++i) {
  240. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  241. blob_offsets[i]));
  242. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number,
  243. blob_offsets[i], file_size, blob_sizes[i],
  244. kNoCompression, prefetch_buffer, &values[i],
  245. &bytes_read));
  246. ASSERT_EQ(values[i], blobs[i]);
  247. ASSERT_TRUE(values[i].IsPinned());
  248. ASSERT_EQ(bytes_read,
  249. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  250. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  251. blob_offsets[i]));
  252. total_bytes += bytes_read; // on-disk blob record size
  253. blob_bytes += blob_sizes[i]; // cached blob value size
  254. }
  255. // Retrieved the blob cache num_blobs * 3 times via TEST_BlobInCache,
  256. // GetBlob, and TEST_BlobInCache.
  257. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, num_blobs * 3);
  258. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0); // without i/o
  259. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0); // without i/o
  260. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), 0);
  261. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), num_blobs * 3);
  262. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  263. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ),
  264. blob_bytes * 3);
  265. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  266. // Cache-only GetBlob
  267. read_options.read_tier = ReadTier::kBlockCacheTier;
  268. total_bytes = 0;
  269. blob_bytes = 0;
  270. get_perf_context()->Reset();
  271. statistics->Reset().PermitUncheckedError();
  272. for (size_t i = 0; i < num_blobs; ++i) {
  273. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  274. blob_offsets[i]));
  275. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number,
  276. blob_offsets[i], file_size, blob_sizes[i],
  277. kNoCompression, prefetch_buffer, &values[i],
  278. &bytes_read));
  279. ASSERT_EQ(values[i], blobs[i]);
  280. ASSERT_TRUE(values[i].IsPinned());
  281. ASSERT_EQ(bytes_read,
  282. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  283. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  284. blob_offsets[i]));
  285. total_bytes += bytes_read;
  286. blob_bytes += blob_sizes[i];
  287. }
  288. // Retrieved the blob cache num_blobs * 3 times via TEST_BlobInCache,
  289. // GetBlob, and TEST_BlobInCache.
  290. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, num_blobs * 3);
  291. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0); // without i/o
  292. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0); // without i/o
  293. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), 0);
  294. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), num_blobs * 3);
  295. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  296. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ),
  297. blob_bytes * 3);
  298. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  299. }
  300. options_.blob_cache->EraseUnRefEntries();
  301. {
  302. // Cache-only GetBlob
  303. std::vector<PinnableSlice> values(keys.size());
  304. uint64_t bytes_read = 0;
  305. read_options.read_tier = ReadTier::kBlockCacheTier;
  306. read_options.fill_cache = true;
  307. get_perf_context()->Reset();
  308. statistics->Reset().PermitUncheckedError();
  309. for (size_t i = 0; i < num_blobs; ++i) {
  310. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  311. blob_offsets[i]));
  312. ASSERT_TRUE(blob_source
  313. .GetBlob(read_options, keys[i], blob_file_number,
  314. blob_offsets[i], file_size, blob_sizes[i],
  315. kNoCompression, prefetch_buffer, &values[i],
  316. &bytes_read)
  317. .IsIncomplete());
  318. ASSERT_TRUE(values[i].empty());
  319. ASSERT_FALSE(values[i].IsPinned());
  320. ASSERT_EQ(bytes_read, 0);
  321. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  322. blob_offsets[i]));
  323. }
  324. // Retrieved the blob cache num_blobs * 3 times via TEST_BlobInCache,
  325. // GetBlob, and TEST_BlobInCache.
  326. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, 0);
  327. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0);
  328. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0);
  329. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 3);
  330. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), 0);
  331. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  332. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ), 0);
  333. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  334. }
  335. {
  336. // GetBlob from non-existing file
  337. std::vector<PinnableSlice> values(keys.size());
  338. uint64_t bytes_read = 0;
  339. uint64_t file_number = 100; // non-existing file
  340. read_options.read_tier = ReadTier::kReadAllTier;
  341. read_options.fill_cache = true;
  342. get_perf_context()->Reset();
  343. statistics->Reset().PermitUncheckedError();
  344. for (size_t i = 0; i < num_blobs; ++i) {
  345. ASSERT_FALSE(blob_source.TEST_BlobInCache(file_number, file_size,
  346. blob_offsets[i]));
  347. ASSERT_TRUE(blob_source
  348. .GetBlob(read_options, keys[i], file_number,
  349. blob_offsets[i], file_size, blob_sizes[i],
  350. kNoCompression, prefetch_buffer, &values[i],
  351. &bytes_read)
  352. .IsIOError());
  353. ASSERT_TRUE(values[i].empty());
  354. ASSERT_FALSE(values[i].IsPinned());
  355. ASSERT_EQ(bytes_read, 0);
  356. ASSERT_FALSE(blob_source.TEST_BlobInCache(file_number, file_size,
  357. blob_offsets[i]));
  358. }
  359. // Retrieved the blob cache num_blobs * 3 times via TEST_BlobInCache,
  360. // GetBlob, and TEST_BlobInCache.
  361. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, 0);
  362. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0);
  363. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0);
  364. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 3);
  365. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), 0);
  366. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  367. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ), 0);
  368. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  369. }
  370. }
  371. TEST_F(BlobSourceTest, GetCompressedBlobs) {
  372. if (!Snappy_Supported()) {
  373. return;
  374. }
  375. const CompressionType compression = kSnappyCompression;
  376. options_.cf_paths.emplace_back(
  377. test::PerThreadDBPath(env_, "BlobSourceTest_GetCompressedBlobs"), 0);
  378. DestroyAndReopen(options_);
  379. ImmutableOptions immutable_options(options_);
  380. MutableCFOptions mutable_cf_options(options_);
  381. constexpr uint32_t column_family_id = 1;
  382. constexpr bool has_ttl = false;
  383. constexpr ExpirationRange expiration_range;
  384. constexpr size_t num_blobs = 256;
  385. std::vector<std::string> key_strs;
  386. std::vector<std::string> blob_strs;
  387. for (size_t i = 0; i < num_blobs; ++i) {
  388. key_strs.push_back("key" + std::to_string(i));
  389. blob_strs.push_back("blob" + std::to_string(i));
  390. }
  391. std::vector<Slice> keys;
  392. std::vector<Slice> blobs;
  393. for (size_t i = 0; i < num_blobs; ++i) {
  394. keys.emplace_back(key_strs[i]);
  395. blobs.emplace_back(blob_strs[i]);
  396. }
  397. std::vector<uint64_t> blob_offsets(keys.size());
  398. std::vector<uint64_t> blob_sizes(keys.size());
  399. constexpr size_t capacity = 1024;
  400. auto backing_cache = NewLRUCache(capacity); // Blob file cache
  401. FileOptions file_options;
  402. std::unique_ptr<BlobFileCache> blob_file_cache =
  403. std::make_unique<BlobFileCache>(
  404. backing_cache.get(), &immutable_options, &file_options,
  405. column_family_id, nullptr /*HistogramImpl*/, nullptr /*IOTracer*/);
  406. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  407. db_session_id_, blob_file_cache.get());
  408. ReadOptions read_options;
  409. read_options.verify_checksums = true;
  410. uint64_t bytes_read = 0;
  411. std::vector<PinnableSlice> values(keys.size());
  412. {
  413. // Snappy Compression
  414. const uint64_t file_number = 1;
  415. read_options.read_tier = ReadTier::kReadAllTier;
  416. WriteBlobFile(immutable_options, column_family_id, has_ttl,
  417. expiration_range, expiration_range, file_number, keys, blobs,
  418. compression, blob_offsets, blob_sizes);
  419. CacheHandleGuard<BlobFileReader> blob_file_reader;
  420. ASSERT_OK(blob_source.GetBlobFileReader(read_options, file_number,
  421. &blob_file_reader));
  422. ASSERT_NE(blob_file_reader.GetValue(), nullptr);
  423. const uint64_t file_size = blob_file_reader.GetValue()->GetFileSize();
  424. ASSERT_EQ(blob_file_reader.GetValue()->GetCompressionType(), compression);
  425. for (size_t i = 0; i < num_blobs; ++i) {
  426. ASSERT_NE(blobs[i].size() /*uncompressed size*/,
  427. blob_sizes[i] /*compressed size*/);
  428. }
  429. read_options.fill_cache = true;
  430. read_options.read_tier = ReadTier::kReadAllTier;
  431. get_perf_context()->Reset();
  432. for (size_t i = 0; i < num_blobs; ++i) {
  433. ASSERT_FALSE(blob_source.TEST_BlobInCache(file_number, file_size,
  434. blob_offsets[i]));
  435. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], file_number,
  436. blob_offsets[i], file_size, blob_sizes[i],
  437. compression, nullptr /*prefetch_buffer*/,
  438. &values[i], &bytes_read));
  439. ASSERT_EQ(values[i], blobs[i] /*uncompressed blob*/);
  440. ASSERT_NE(values[i].size(), blob_sizes[i] /*compressed size*/);
  441. ASSERT_EQ(bytes_read,
  442. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  443. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  444. blob_offsets[i]));
  445. }
  446. ASSERT_GE((int)get_perf_context()->blob_decompress_time, 0);
  447. read_options.read_tier = ReadTier::kBlockCacheTier;
  448. get_perf_context()->Reset();
  449. for (size_t i = 0; i < num_blobs; ++i) {
  450. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  451. blob_offsets[i]));
  452. // Compressed blob size is passed in GetBlob
  453. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], file_number,
  454. blob_offsets[i], file_size, blob_sizes[i],
  455. compression, nullptr /*prefetch_buffer*/,
  456. &values[i], &bytes_read));
  457. ASSERT_EQ(values[i], blobs[i] /*uncompressed blob*/);
  458. ASSERT_NE(values[i].size(), blob_sizes[i] /*compressed size*/);
  459. ASSERT_EQ(bytes_read,
  460. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  461. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  462. blob_offsets[i]));
  463. }
  464. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  465. }
  466. }
  467. TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) {
  468. options_.cf_paths.emplace_back(
  469. test::PerThreadDBPath(env_, "BlobSourceTest_MultiGetBlobsFromMultiFiles"),
  470. 0);
  471. options_.statistics = CreateDBStatistics();
  472. Statistics* statistics = options_.statistics.get();
  473. assert(statistics);
  474. DestroyAndReopen(options_);
  475. ImmutableOptions immutable_options(options_);
  476. MutableCFOptions mutable_cf_options(options_);
  477. constexpr uint32_t column_family_id = 1;
  478. constexpr bool has_ttl = false;
  479. constexpr ExpirationRange expiration_range;
  480. constexpr uint64_t blob_files = 2;
  481. constexpr size_t num_blobs = 32;
  482. std::vector<std::string> key_strs;
  483. std::vector<std::string> blob_strs;
  484. for (size_t i = 0; i < num_blobs; ++i) {
  485. key_strs.push_back("key" + std::to_string(i));
  486. blob_strs.push_back("blob" + std::to_string(i));
  487. }
  488. std::vector<Slice> keys;
  489. std::vector<Slice> blobs;
  490. uint64_t file_size = BlobLogHeader::kSize;
  491. uint64_t blob_value_bytes = 0;
  492. for (size_t i = 0; i < num_blobs; ++i) {
  493. keys.emplace_back(key_strs[i]);
  494. blobs.emplace_back(blob_strs[i]);
  495. blob_value_bytes += blobs[i].size();
  496. file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
  497. }
  498. file_size += BlobLogFooter::kSize;
  499. const uint64_t blob_records_bytes =
  500. file_size - BlobLogHeader::kSize - BlobLogFooter::kSize;
  501. std::vector<uint64_t> blob_offsets(keys.size());
  502. std::vector<uint64_t> blob_sizes(keys.size());
  503. {
  504. // Write key/blob pairs to multiple blob files.
  505. for (size_t i = 0; i < blob_files; ++i) {
  506. const uint64_t file_number = i + 1;
  507. WriteBlobFile(immutable_options, column_family_id, has_ttl,
  508. expiration_range, expiration_range, file_number, keys,
  509. blobs, kNoCompression, blob_offsets, blob_sizes);
  510. }
  511. }
  512. constexpr size_t capacity = 10;
  513. std::shared_ptr<Cache> backing_cache =
  514. NewLRUCache(capacity); // Blob file cache
  515. FileOptions file_options;
  516. constexpr HistogramImpl* blob_file_read_hist = nullptr;
  517. std::unique_ptr<BlobFileCache> blob_file_cache =
  518. std::make_unique<BlobFileCache>(
  519. backing_cache.get(), &immutable_options, &file_options,
  520. column_family_id, blob_file_read_hist, nullptr /*IOTracer*/);
  521. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  522. db_session_id_, blob_file_cache.get());
  523. ReadOptions read_options;
  524. read_options.verify_checksums = true;
  525. uint64_t bytes_read = 0;
  526. {
  527. // MultiGetBlob
  528. read_options.fill_cache = true;
  529. read_options.read_tier = ReadTier::kReadAllTier;
  530. autovector<BlobFileReadRequests> blob_reqs;
  531. std::array<autovector<BlobReadRequest>, blob_files> blob_reqs_in_file;
  532. std::array<PinnableSlice, num_blobs * blob_files> value_buf;
  533. std::array<Status, num_blobs * blob_files> statuses_buf;
  534. for (size_t i = 0; i < blob_files; ++i) {
  535. const uint64_t file_number = i + 1;
  536. for (size_t j = 0; j < num_blobs; ++j) {
  537. blob_reqs_in_file[i].emplace_back(
  538. keys[j], blob_offsets[j], blob_sizes[j], kNoCompression,
  539. &value_buf[i * num_blobs + j], &statuses_buf[i * num_blobs + j]);
  540. }
  541. blob_reqs.emplace_back(file_number, file_size, blob_reqs_in_file[i]);
  542. }
  543. get_perf_context()->Reset();
  544. statistics->Reset().PermitUncheckedError();
  545. blob_source.MultiGetBlob(read_options, blob_reqs, &bytes_read);
  546. for (size_t i = 0; i < blob_files; ++i) {
  547. const uint64_t file_number = i + 1;
  548. for (size_t j = 0; j < num_blobs; ++j) {
  549. ASSERT_OK(statuses_buf[i * num_blobs + j]);
  550. ASSERT_EQ(value_buf[i * num_blobs + j], blobs[j]);
  551. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  552. blob_offsets[j]));
  553. }
  554. }
  555. // Retrieved all blobs from 2 blob files twice via MultiGetBlob and
  556. // TEST_BlobInCache.
  557. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count,
  558. num_blobs * blob_files);
  559. ASSERT_EQ((int)get_perf_context()->blob_read_count,
  560. num_blobs * blob_files); // blocking i/o
  561. ASSERT_EQ((int)get_perf_context()->blob_read_byte,
  562. blob_records_bytes * blob_files); // blocking i/o
  563. ASSERT_GE((int)get_perf_context()->blob_checksum_time, 0);
  564. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  565. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS),
  566. num_blobs * blob_files); // MultiGetBlob
  567. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT),
  568. num_blobs * blob_files); // TEST_BlobInCache
  569. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD),
  570. num_blobs * blob_files); // MultiGetBlob
  571. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ),
  572. blob_value_bytes * blob_files); // TEST_BlobInCache
  573. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE),
  574. blob_value_bytes * blob_files); // MultiGetBlob
  575. get_perf_context()->Reset();
  576. statistics->Reset().PermitUncheckedError();
  577. autovector<BlobReadRequest> fake_blob_reqs_in_file;
  578. std::array<PinnableSlice, num_blobs> fake_value_buf;
  579. std::array<Status, num_blobs> fake_statuses_buf;
  580. const uint64_t fake_file_number = 100;
  581. for (size_t i = 0; i < num_blobs; ++i) {
  582. fake_blob_reqs_in_file.emplace_back(
  583. keys[i], blob_offsets[i], blob_sizes[i], kNoCompression,
  584. &fake_value_buf[i], &fake_statuses_buf[i]);
  585. }
  586. // Add a fake multi-get blob request.
  587. blob_reqs.emplace_back(fake_file_number, file_size, fake_blob_reqs_in_file);
  588. blob_source.MultiGetBlob(read_options, blob_reqs, &bytes_read);
  589. // Check the real blob read requests.
  590. for (size_t i = 0; i < blob_files; ++i) {
  591. const uint64_t file_number = i + 1;
  592. for (size_t j = 0; j < num_blobs; ++j) {
  593. ASSERT_OK(statuses_buf[i * num_blobs + j]);
  594. ASSERT_EQ(value_buf[i * num_blobs + j], blobs[j]);
  595. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  596. blob_offsets[j]));
  597. }
  598. }
  599. // Check the fake blob request.
  600. for (size_t i = 0; i < num_blobs; ++i) {
  601. ASSERT_TRUE(fake_statuses_buf[i].IsIOError());
  602. ASSERT_TRUE(fake_value_buf[i].empty());
  603. ASSERT_FALSE(blob_source.TEST_BlobInCache(fake_file_number, file_size,
  604. blob_offsets[i]));
  605. }
  606. // Retrieved all blobs from 3 blob files (including the fake one) twice
  607. // via MultiGetBlob and TEST_BlobInCache.
  608. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count,
  609. num_blobs * blob_files * 2);
  610. ASSERT_EQ((int)get_perf_context()->blob_read_count,
  611. 0); // blocking i/o
  612. ASSERT_EQ((int)get_perf_context()->blob_read_byte,
  613. 0); // blocking i/o
  614. ASSERT_GE((int)get_perf_context()->blob_checksum_time, 0);
  615. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  616. // Fake blob requests: MultiGetBlob and TEST_BlobInCache
  617. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 2);
  618. // Real blob requests: MultiGetBlob and TEST_BlobInCache
  619. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT),
  620. num_blobs * blob_files * 2);
  621. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  622. // Real blob requests: MultiGetBlob and TEST_BlobInCache
  623. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ),
  624. blob_value_bytes * blob_files * 2);
  625. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  626. }
  627. }
  628. TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
  629. options_.cf_paths.emplace_back(
  630. test::PerThreadDBPath(env_, "BlobSourceTest_MultiGetBlobsFromCache"), 0);
  631. options_.statistics = CreateDBStatistics();
  632. Statistics* statistics = options_.statistics.get();
  633. assert(statistics);
  634. DestroyAndReopen(options_);
  635. ImmutableOptions immutable_options(options_);
  636. MutableCFOptions mutable_cf_options(options_);
  637. constexpr uint32_t column_family_id = 1;
  638. constexpr bool has_ttl = false;
  639. constexpr ExpirationRange expiration_range;
  640. constexpr uint64_t blob_file_number = 1;
  641. constexpr size_t num_blobs = 16;
  642. std::vector<std::string> key_strs;
  643. std::vector<std::string> blob_strs;
  644. for (size_t i = 0; i < num_blobs; ++i) {
  645. key_strs.push_back("key" + std::to_string(i));
  646. blob_strs.push_back("blob" + std::to_string(i));
  647. }
  648. std::vector<Slice> keys;
  649. std::vector<Slice> blobs;
  650. uint64_t file_size = BlobLogHeader::kSize;
  651. for (size_t i = 0; i < num_blobs; ++i) {
  652. keys.emplace_back(key_strs[i]);
  653. blobs.emplace_back(blob_strs[i]);
  654. file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
  655. }
  656. file_size += BlobLogFooter::kSize;
  657. std::vector<uint64_t> blob_offsets(keys.size());
  658. std::vector<uint64_t> blob_sizes(keys.size());
  659. WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range,
  660. expiration_range, blob_file_number, keys, blobs, kNoCompression,
  661. blob_offsets, blob_sizes);
  662. constexpr size_t capacity = 10;
  663. std::shared_ptr<Cache> backing_cache =
  664. NewLRUCache(capacity); // Blob file cache
  665. FileOptions file_options;
  666. constexpr HistogramImpl* blob_file_read_hist = nullptr;
  667. std::unique_ptr<BlobFileCache> blob_file_cache =
  668. std::make_unique<BlobFileCache>(
  669. backing_cache.get(), &immutable_options, &file_options,
  670. column_family_id, blob_file_read_hist, nullptr /*IOTracer*/);
  671. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  672. db_session_id_, blob_file_cache.get());
  673. ReadOptions read_options;
  674. read_options.verify_checksums = true;
  675. constexpr FilePrefetchBuffer* prefetch_buffer = nullptr;
  676. {
  677. // MultiGetBlobFromOneFile
  678. uint64_t bytes_read = 0;
  679. std::array<Status, num_blobs> statuses_buf;
  680. std::array<PinnableSlice, num_blobs> value_buf;
  681. autovector<BlobReadRequest> blob_reqs;
  682. for (size_t i = 0; i < num_blobs; i += 2) { // even index
  683. blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i],
  684. kNoCompression, &value_buf[i], &statuses_buf[i]);
  685. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  686. blob_offsets[i]));
  687. }
  688. read_options.fill_cache = true;
  689. read_options.read_tier = ReadTier::kReadAllTier;
  690. get_perf_context()->Reset();
  691. statistics->Reset().PermitUncheckedError();
  692. // Get half of blobs
  693. blob_source.MultiGetBlobFromOneFile(read_options, blob_file_number,
  694. file_size, blob_reqs, &bytes_read);
  695. uint64_t fs_read_bytes = 0;
  696. uint64_t ca_read_bytes = 0;
  697. for (size_t i = 0; i < num_blobs; ++i) {
  698. if (i % 2 == 0) {
  699. ASSERT_OK(statuses_buf[i]);
  700. ASSERT_EQ(value_buf[i], blobs[i]);
  701. ASSERT_TRUE(value_buf[i].IsPinned());
  702. fs_read_bytes +=
  703. blob_sizes[i] + keys[i].size() + BlobLogRecord::kHeaderSize;
  704. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  705. blob_offsets[i]));
  706. ca_read_bytes += blob_sizes[i];
  707. } else {
  708. statuses_buf[i].PermitUncheckedError();
  709. ASSERT_TRUE(value_buf[i].empty());
  710. ASSERT_FALSE(value_buf[i].IsPinned());
  711. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  712. blob_offsets[i]));
  713. }
  714. }
  715. constexpr int num_even_blobs = num_blobs / 2;
  716. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, num_even_blobs);
  717. ASSERT_EQ((int)get_perf_context()->blob_read_count,
  718. num_even_blobs); // blocking i/o
  719. ASSERT_EQ((int)get_perf_context()->blob_read_byte,
  720. fs_read_bytes); // blocking i/o
  721. ASSERT_GE((int)get_perf_context()->blob_checksum_time, 0);
  722. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  723. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs);
  724. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), num_even_blobs);
  725. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), num_even_blobs);
  726. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ),
  727. ca_read_bytes);
  728. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE),
  729. ca_read_bytes);
  730. // Get the rest of blobs
  731. for (size_t i = 1; i < num_blobs; i += 2) { // odd index
  732. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  733. blob_offsets[i]));
  734. ASSERT_OK(blob_source.GetBlob(read_options, keys[i], blob_file_number,
  735. blob_offsets[i], file_size, blob_sizes[i],
  736. kNoCompression, prefetch_buffer,
  737. &value_buf[i], &bytes_read));
  738. ASSERT_EQ(value_buf[i], blobs[i]);
  739. ASSERT_TRUE(value_buf[i].IsPinned());
  740. ASSERT_EQ(bytes_read,
  741. BlobLogRecord::kHeaderSize + keys[i].size() + blob_sizes[i]);
  742. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  743. blob_offsets[i]));
  744. }
  745. // Cache-only MultiGetBlobFromOneFile
  746. read_options.read_tier = ReadTier::kBlockCacheTier;
  747. get_perf_context()->Reset();
  748. statistics->Reset().PermitUncheckedError();
  749. blob_reqs.clear();
  750. for (size_t i = 0; i < num_blobs; ++i) {
  751. blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i],
  752. kNoCompression, &value_buf[i], &statuses_buf[i]);
  753. }
  754. blob_source.MultiGetBlobFromOneFile(read_options, blob_file_number,
  755. file_size, blob_reqs, &bytes_read);
  756. uint64_t blob_bytes = 0;
  757. for (size_t i = 0; i < num_blobs; ++i) {
  758. ASSERT_OK(statuses_buf[i]);
  759. ASSERT_EQ(value_buf[i], blobs[i]);
  760. ASSERT_TRUE(value_buf[i].IsPinned());
  761. ASSERT_TRUE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  762. blob_offsets[i]));
  763. blob_bytes += blob_sizes[i];
  764. }
  765. // Retrieved the blob cache num_blobs * 2 times via GetBlob and
  766. // TEST_BlobInCache.
  767. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, num_blobs * 2);
  768. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0); // blocking i/o
  769. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0); // blocking i/o
  770. ASSERT_GE((int)get_perf_context()->blob_checksum_time, 0);
  771. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  772. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), 0);
  773. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), num_blobs * 2);
  774. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  775. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ),
  776. blob_bytes * 2);
  777. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  778. }
  779. options_.blob_cache->EraseUnRefEntries();
  780. {
  781. // Cache-only MultiGetBlobFromOneFile
  782. uint64_t bytes_read = 0;
  783. read_options.read_tier = ReadTier::kBlockCacheTier;
  784. std::array<Status, num_blobs> statuses_buf;
  785. std::array<PinnableSlice, num_blobs> value_buf;
  786. autovector<BlobReadRequest> blob_reqs;
  787. for (size_t i = 0; i < num_blobs; i++) {
  788. blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i],
  789. kNoCompression, &value_buf[i], &statuses_buf[i]);
  790. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  791. blob_offsets[i]));
  792. }
  793. get_perf_context()->Reset();
  794. statistics->Reset().PermitUncheckedError();
  795. blob_source.MultiGetBlobFromOneFile(read_options, blob_file_number,
  796. file_size, blob_reqs, &bytes_read);
  797. for (size_t i = 0; i < num_blobs; ++i) {
  798. ASSERT_TRUE(statuses_buf[i].IsIncomplete());
  799. ASSERT_TRUE(value_buf[i].empty());
  800. ASSERT_FALSE(value_buf[i].IsPinned());
  801. ASSERT_FALSE(blob_source.TEST_BlobInCache(blob_file_number, file_size,
  802. blob_offsets[i]));
  803. }
  804. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, 0);
  805. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0); // blocking i/o
  806. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0); // blocking i/o
  807. ASSERT_EQ((int)get_perf_context()->blob_checksum_time, 0);
  808. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  809. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 2);
  810. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), 0);
  811. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  812. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ), 0);
  813. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  814. }
  815. {
  816. // MultiGetBlobFromOneFile from non-existing file
  817. uint64_t bytes_read = 0;
  818. uint64_t non_existing_file_number = 100;
  819. read_options.read_tier = ReadTier::kReadAllTier;
  820. std::array<Status, num_blobs> statuses_buf;
  821. std::array<PinnableSlice, num_blobs> value_buf;
  822. autovector<BlobReadRequest> blob_reqs;
  823. for (size_t i = 0; i < num_blobs; i++) {
  824. blob_reqs.emplace_back(keys[i], blob_offsets[i], blob_sizes[i],
  825. kNoCompression, &value_buf[i], &statuses_buf[i]);
  826. ASSERT_FALSE(blob_source.TEST_BlobInCache(non_existing_file_number,
  827. file_size, blob_offsets[i]));
  828. }
  829. get_perf_context()->Reset();
  830. statistics->Reset().PermitUncheckedError();
  831. blob_source.MultiGetBlobFromOneFile(read_options, non_existing_file_number,
  832. file_size, blob_reqs, &bytes_read);
  833. for (size_t i = 0; i < num_blobs; ++i) {
  834. ASSERT_TRUE(statuses_buf[i].IsIOError());
  835. ASSERT_TRUE(value_buf[i].empty());
  836. ASSERT_FALSE(value_buf[i].IsPinned());
  837. ASSERT_FALSE(blob_source.TEST_BlobInCache(non_existing_file_number,
  838. file_size, blob_offsets[i]));
  839. }
  840. ASSERT_EQ((int)get_perf_context()->blob_cache_hit_count, 0);
  841. ASSERT_EQ((int)get_perf_context()->blob_read_count, 0); // blocking i/o
  842. ASSERT_EQ((int)get_perf_context()->blob_read_byte, 0); // blocking i/o
  843. ASSERT_EQ((int)get_perf_context()->blob_checksum_time, 0);
  844. ASSERT_EQ((int)get_perf_context()->blob_decompress_time, 0);
  845. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_MISS), num_blobs * 2);
  846. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_HIT), 0);
  847. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_ADD), 0);
  848. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_READ), 0);
  849. ASSERT_EQ(statistics->getTickerCount(BLOB_DB_CACHE_BYTES_WRITE), 0);
  850. }
  851. }
  852. class BlobSecondaryCacheTest : public DBTestBase {
  853. protected:
  854. public:
  855. explicit BlobSecondaryCacheTest()
  856. : DBTestBase("blob_secondary_cache_test", /*env_do_fsync=*/true) {
  857. options_.env = env_;
  858. options_.enable_blob_files = true;
  859. options_.create_if_missing = true;
  860. // Set a small cache capacity to evict entries from the cache, and to test
  861. // that secondary cache is used properly.
  862. lru_cache_opts_.capacity = 1024;
  863. lru_cache_opts_.num_shard_bits = 0;
  864. lru_cache_opts_.strict_capacity_limit = true;
  865. lru_cache_opts_.metadata_charge_policy = kDontChargeCacheMetadata;
  866. lru_cache_opts_.high_pri_pool_ratio = 0.2;
  867. lru_cache_opts_.low_pri_pool_ratio = 0.2;
  868. secondary_cache_opts_.capacity = 8 << 20; // 8 MB
  869. secondary_cache_opts_.num_shard_bits = 0;
  870. secondary_cache_opts_.metadata_charge_policy =
  871. kDefaultCacheMetadataChargePolicy;
  872. // Read blobs from the secondary cache if they are not in the primary cache
  873. options_.lowest_used_cache_tier = CacheTier::kNonVolatileBlockTier;
  874. assert(db_->GetDbIdentity(db_id_).ok());
  875. assert(db_->GetDbSessionId(db_session_id_).ok());
  876. }
  877. Options options_;
  878. LRUCacheOptions lru_cache_opts_;
  879. CompressedSecondaryCacheOptions secondary_cache_opts_;
  880. std::string db_id_;
  881. std::string db_session_id_;
  882. };
  883. TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
  884. if (!Snappy_Supported()) {
  885. return;
  886. }
  887. secondary_cache_opts_.compression_type = kSnappyCompression;
  888. lru_cache_opts_.secondary_cache =
  889. NewCompressedSecondaryCache(secondary_cache_opts_);
  890. options_.blob_cache = NewLRUCache(lru_cache_opts_);
  891. options_.cf_paths.emplace_back(
  892. test::PerThreadDBPath(
  893. env_, "BlobSecondaryCacheTest_GetBlobsFromSecondaryCache"),
  894. 0);
  895. options_.statistics = CreateDBStatistics();
  896. Statistics* statistics = options_.statistics.get();
  897. assert(statistics);
  898. DestroyAndReopen(options_);
  899. ImmutableOptions immutable_options(options_);
  900. MutableCFOptions mutable_cf_options(options_);
  901. constexpr uint32_t column_family_id = 1;
  902. constexpr bool has_ttl = false;
  903. constexpr ExpirationRange expiration_range;
  904. constexpr uint64_t file_number = 1;
  905. Random rnd(301);
  906. std::vector<std::string> key_strs{"key0", "key1"};
  907. std::vector<std::string> blob_strs{rnd.RandomString(512),
  908. rnd.RandomString(768)};
  909. std::vector<Slice> keys{key_strs[0], key_strs[1]};
  910. std::vector<Slice> blobs{blob_strs[0], blob_strs[1]};
  911. std::vector<uint64_t> blob_offsets(keys.size());
  912. std::vector<uint64_t> blob_sizes(keys.size());
  913. WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range,
  914. expiration_range, file_number, keys, blobs, kNoCompression,
  915. blob_offsets, blob_sizes);
  916. constexpr size_t capacity = 1024;
  917. std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
  918. FileOptions file_options;
  919. constexpr HistogramImpl* blob_file_read_hist = nullptr;
  920. std::unique_ptr<BlobFileCache> blob_file_cache(new BlobFileCache(
  921. backing_cache.get(), &immutable_options, &file_options, column_family_id,
  922. blob_file_read_hist, nullptr /*IOTracer*/));
  923. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  924. db_session_id_, blob_file_cache.get());
  925. CacheHandleGuard<BlobFileReader> file_reader;
  926. ReadOptions read_options;
  927. ASSERT_OK(
  928. blob_source.GetBlobFileReader(read_options, file_number, &file_reader));
  929. ASSERT_NE(file_reader.GetValue(), nullptr);
  930. const uint64_t file_size = file_reader.GetValue()->GetFileSize();
  931. ASSERT_EQ(file_reader.GetValue()->GetCompressionType(), kNoCompression);
  932. read_options.verify_checksums = true;
  933. auto blob_cache = options_.blob_cache;
  934. auto secondary_cache = lru_cache_opts_.secondary_cache;
  935. {
  936. // GetBlob
  937. std::vector<PinnableSlice> values(keys.size());
  938. read_options.fill_cache = true;
  939. get_perf_context()->Reset();
  940. // key0 should be filled to the primary cache from the blob file.
  941. ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
  942. blob_offsets[0], file_size, blob_sizes[0],
  943. kNoCompression, nullptr /* prefetch_buffer */,
  944. values.data(), nullptr /* bytes_read */));
  945. // Release cache handle
  946. values[0].Reset();
  947. // key0 should be evicted and key0's dummy item is inserted into secondary
  948. // cache. key1 should be filled to the primary cache from the blob file.
  949. ASSERT_OK(blob_source.GetBlob(read_options, keys[1], file_number,
  950. blob_offsets[1], file_size, blob_sizes[1],
  951. kNoCompression, nullptr /* prefetch_buffer */,
  952. &values[1], nullptr /* bytes_read */));
  953. // Release cache handle
  954. values[1].Reset();
  955. // key0 should be filled to the primary cache from the blob file. key1
  956. // should be evicted and key1's dummy item is inserted into secondary cache.
  957. ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
  958. blob_offsets[0], file_size, blob_sizes[0],
  959. kNoCompression, nullptr /* prefetch_buffer */,
  960. values.data(), nullptr /* bytes_read */));
  961. ASSERT_EQ(values[0], blobs[0]);
  962. ASSERT_TRUE(
  963. blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[0]));
  964. // Release cache handle
  965. values[0].Reset();
  966. // key0 should be evicted and is inserted into secondary cache.
  967. // key1 should be filled to the primary cache from the blob file.
  968. ASSERT_OK(blob_source.GetBlob(read_options, keys[1], file_number,
  969. blob_offsets[1], file_size, blob_sizes[1],
  970. kNoCompression, nullptr /* prefetch_buffer */,
  971. &values[1], nullptr /* bytes_read */));
  972. ASSERT_EQ(values[1], blobs[1]);
  973. ASSERT_TRUE(
  974. blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[1]));
  975. // Release cache handle
  976. values[1].Reset();
  977. OffsetableCacheKey base_cache_key(db_id_, db_session_id_, file_number);
  978. // blob_cache here only looks at the primary cache since we didn't provide
  979. // the cache item helper for the secondary cache. However, since key0 is
  980. // demoted to the secondary cache, we shouldn't be able to find it in the
  981. // primary cache.
  982. {
  983. CacheKey cache_key = base_cache_key.WithOffset(blob_offsets[0]);
  984. const Slice key0 = cache_key.AsSlice();
  985. auto handle0 = blob_cache->BasicLookup(key0, statistics);
  986. ASSERT_EQ(handle0, nullptr);
  987. // key0's item should be in the secondary cache.
  988. bool kept_in_sec_cache = false;
  989. auto sec_handle0 = secondary_cache->Lookup(
  990. key0, BlobSource::SharedCacheInterface::GetFullHelper(),
  991. /*context*/ nullptr, true,
  992. /*advise_erase=*/true, /*stats=*/nullptr, kept_in_sec_cache);
  993. ASSERT_FALSE(kept_in_sec_cache);
  994. ASSERT_NE(sec_handle0, nullptr);
  995. ASSERT_TRUE(sec_handle0->IsReady());
  996. auto value = static_cast<BlobContents*>(sec_handle0->Value());
  997. ASSERT_NE(value, nullptr);
  998. ASSERT_EQ(value->data(), blobs[0]);
  999. delete value;
  1000. // key0 doesn't exist in the blob cache although key0's dummy
  1001. // item exist in the secondary cache.
  1002. ASSERT_FALSE(blob_source.TEST_BlobInCache(file_number, file_size,
  1003. blob_offsets[0]));
  1004. }
  1005. // key1 should exists in the primary cache. key1's dummy item exists
  1006. // in the secondary cache.
  1007. {
  1008. CacheKey cache_key = base_cache_key.WithOffset(blob_offsets[1]);
  1009. const Slice key1 = cache_key.AsSlice();
  1010. auto handle1 = blob_cache->BasicLookup(key1, statistics);
  1011. ASSERT_NE(handle1, nullptr);
  1012. blob_cache->Release(handle1);
  1013. bool kept_in_sec_cache = false;
  1014. auto sec_handle1 = secondary_cache->Lookup(
  1015. key1, BlobSource::SharedCacheInterface::GetFullHelper(),
  1016. /*context*/ nullptr, true,
  1017. /*advise_erase=*/true, /*stats=*/nullptr, kept_in_sec_cache);
  1018. ASSERT_FALSE(kept_in_sec_cache);
  1019. ASSERT_EQ(sec_handle1, nullptr);
  1020. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  1021. blob_offsets[1]));
  1022. }
  1023. {
  1024. // fetch key0 from the blob file to the primary cache.
  1025. // key1 is evicted and inserted into the secondary cache.
  1026. ASSERT_OK(blob_source.GetBlob(
  1027. read_options, keys[0], file_number, blob_offsets[0], file_size,
  1028. blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */,
  1029. values.data(), nullptr /* bytes_read */));
  1030. ASSERT_EQ(values[0], blobs[0]);
  1031. // Release cache handle
  1032. values[0].Reset();
  1033. // key0 should be in the primary cache.
  1034. CacheKey cache_key0 = base_cache_key.WithOffset(blob_offsets[0]);
  1035. const Slice key0 = cache_key0.AsSlice();
  1036. auto handle0 = blob_cache->BasicLookup(key0, statistics);
  1037. ASSERT_NE(handle0, nullptr);
  1038. auto value = static_cast<BlobContents*>(blob_cache->Value(handle0));
  1039. ASSERT_NE(value, nullptr);
  1040. ASSERT_EQ(value->data(), blobs[0]);
  1041. blob_cache->Release(handle0);
  1042. // key1 is not in the primary cache and is in the secondary cache.
  1043. CacheKey cache_key1 = base_cache_key.WithOffset(blob_offsets[1]);
  1044. const Slice key1 = cache_key1.AsSlice();
  1045. auto handle1 = blob_cache->BasicLookup(key1, statistics);
  1046. ASSERT_EQ(handle1, nullptr);
  1047. // erase key0 from the primary cache.
  1048. blob_cache->Erase(key0);
  1049. handle0 = blob_cache->BasicLookup(key0, statistics);
  1050. ASSERT_EQ(handle0, nullptr);
  1051. // key1 promotion should succeed due to the primary cache being empty. we
  1052. // did't call secondary cache's Lookup() here, because it will remove the
  1053. // key but it won't be able to promote the key to the primary cache.
  1054. // Instead we use the end-to-end blob source API to read key1.
  1055. // In function TEST_BlobInCache, key1's dummy item is inserted into the
  1056. // primary cache and a standalone handle is checked by GetValue().
  1057. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  1058. blob_offsets[1]));
  1059. // key1's dummy handle is in the primary cache and key1's item is still
  1060. // in the secondary cache. So, the primary cache's Lookup() without
  1061. // secondary cache support cannot see it. (NOTE: The dummy handle used
  1062. // to be a leaky abstraction but not anymore.)
  1063. handle1 = blob_cache->BasicLookup(key1, statistics);
  1064. ASSERT_EQ(handle1, nullptr);
  1065. // But after another access, it is promoted to primary cache
  1066. ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
  1067. blob_offsets[1]));
  1068. // And Lookup() can find it (without secondary cache support)
  1069. handle1 = blob_cache->BasicLookup(key1, statistics);
  1070. ASSERT_NE(handle1, nullptr);
  1071. ASSERT_NE(blob_cache->Value(handle1), nullptr);
  1072. blob_cache->Release(handle1);
  1073. }
  1074. }
  1075. }
  1076. class BlobSourceCacheReservationTest : public DBTestBase {
  1077. public:
  1078. explicit BlobSourceCacheReservationTest()
  1079. : DBTestBase("blob_source_cache_reservation_test",
  1080. /*env_do_fsync=*/true) {
  1081. options_.env = env_;
  1082. options_.enable_blob_files = true;
  1083. options_.create_if_missing = true;
  1084. LRUCacheOptions co;
  1085. co.capacity = kCacheCapacity;
  1086. co.num_shard_bits = kNumShardBits;
  1087. co.metadata_charge_policy = kDontChargeCacheMetadata;
  1088. co.high_pri_pool_ratio = 0.0;
  1089. co.low_pri_pool_ratio = 0.0;
  1090. std::shared_ptr<Cache> blob_cache = NewLRUCache(co);
  1091. co.high_pri_pool_ratio = 0.5;
  1092. co.low_pri_pool_ratio = 0.5;
  1093. std::shared_ptr<Cache> block_cache = NewLRUCache(co);
  1094. options_.blob_cache = blob_cache;
  1095. options_.lowest_used_cache_tier = CacheTier::kVolatileTier;
  1096. BlockBasedTableOptions block_based_options;
  1097. block_based_options.no_block_cache = false;
  1098. block_based_options.block_cache = block_cache;
  1099. block_based_options.cache_usage_options.options_overrides.insert(
  1100. {CacheEntryRole::kBlobCache,
  1101. {/* charged = */ CacheEntryRoleOptions::Decision::kEnabled}});
  1102. options_.table_factory.reset(
  1103. NewBlockBasedTableFactory(block_based_options));
  1104. assert(db_->GetDbIdentity(db_id_).ok());
  1105. assert(db_->GetDbSessionId(db_session_id_).ok());
  1106. }
  1107. void GenerateKeysAndBlobs() {
  1108. for (size_t i = 0; i < kNumBlobs; ++i) {
  1109. key_strs_.push_back("key" + std::to_string(i));
  1110. blob_strs_.push_back("blob" + std::to_string(i));
  1111. }
  1112. blob_file_size_ = BlobLogHeader::kSize;
  1113. for (size_t i = 0; i < kNumBlobs; ++i) {
  1114. keys_.emplace_back(key_strs_[i]);
  1115. blobs_.emplace_back(blob_strs_[i]);
  1116. blob_file_size_ +=
  1117. BlobLogRecord::kHeaderSize + keys_[i].size() + blobs_[i].size();
  1118. }
  1119. blob_file_size_ += BlobLogFooter::kSize;
  1120. }
  1121. static constexpr std::size_t kSizeDummyEntry = CacheReservationManagerImpl<
  1122. CacheEntryRole::kBlobCache>::GetDummyEntrySize();
  1123. static constexpr std::size_t kCacheCapacity = 2 * kSizeDummyEntry;
  1124. static constexpr int kNumShardBits = 0; // 2^0 shard
  1125. static constexpr uint32_t kColumnFamilyId = 1;
  1126. static constexpr bool kHasTTL = false;
  1127. static constexpr uint64_t kBlobFileNumber = 1;
  1128. static constexpr size_t kNumBlobs = 16;
  1129. std::vector<Slice> keys_;
  1130. std::vector<Slice> blobs_;
  1131. std::vector<std::string> key_strs_;
  1132. std::vector<std::string> blob_strs_;
  1133. uint64_t blob_file_size_;
  1134. Options options_;
  1135. std::string db_id_;
  1136. std::string db_session_id_;
  1137. };
  1138. TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) {
  1139. options_.cf_paths.emplace_back(
  1140. test::PerThreadDBPath(
  1141. env_, "BlobSourceCacheReservationTest_SimpleCacheReservation"),
  1142. 0);
  1143. GenerateKeysAndBlobs();
  1144. DestroyAndReopen(options_);
  1145. ImmutableOptions immutable_options(options_);
  1146. MutableCFOptions mutable_cf_options(options_);
  1147. constexpr ExpirationRange expiration_range;
  1148. std::vector<uint64_t> blob_offsets(keys_.size());
  1149. std::vector<uint64_t> blob_sizes(keys_.size());
  1150. WriteBlobFile(immutable_options, kColumnFamilyId, kHasTTL, expiration_range,
  1151. expiration_range, kBlobFileNumber, keys_, blobs_,
  1152. kNoCompression, blob_offsets, blob_sizes);
  1153. constexpr size_t capacity = 10;
  1154. std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
  1155. FileOptions file_options;
  1156. constexpr HistogramImpl* blob_file_read_hist = nullptr;
  1157. std::unique_ptr<BlobFileCache> blob_file_cache =
  1158. std::make_unique<BlobFileCache>(
  1159. backing_cache.get(), &immutable_options, &file_options,
  1160. kColumnFamilyId, blob_file_read_hist, nullptr /*IOTracer*/);
  1161. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  1162. db_session_id_, blob_file_cache.get());
  1163. ConcurrentCacheReservationManager* cache_res_mgr =
  1164. static_cast<ChargedCache*>(blob_source.GetBlobCache())
  1165. ->TEST_GetCacheReservationManager();
  1166. ASSERT_NE(cache_res_mgr, nullptr);
  1167. ReadOptions read_options;
  1168. read_options.verify_checksums = true;
  1169. {
  1170. read_options.fill_cache = false;
  1171. std::vector<PinnableSlice> values(keys_.size());
  1172. for (size_t i = 0; i < kNumBlobs; ++i) {
  1173. ASSERT_OK(blob_source.GetBlob(
  1174. read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
  1175. blob_file_size_, blob_sizes[i], kNoCompression,
  1176. nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
  1177. ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0);
  1178. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), 0);
  1179. }
  1180. }
  1181. {
  1182. read_options.fill_cache = true;
  1183. std::vector<PinnableSlice> values(keys_.size());
  1184. // num_blobs is 16, so the total blob cache usage is less than a single
  1185. // dummy entry. Therefore, cache reservation manager only reserves one dummy
  1186. // entry here.
  1187. uint64_t blob_bytes = 0;
  1188. for (size_t i = 0; i < kNumBlobs; ++i) {
  1189. ASSERT_OK(blob_source.GetBlob(
  1190. read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
  1191. blob_file_size_, blob_sizes[i], kNoCompression,
  1192. nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
  1193. size_t charge = 0;
  1194. ASSERT_TRUE(blob_source.TEST_BlobInCache(kBlobFileNumber, blob_file_size_,
  1195. blob_offsets[i], &charge));
  1196. blob_bytes += charge;
  1197. ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), kSizeDummyEntry);
  1198. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), blob_bytes);
  1199. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(),
  1200. options_.blob_cache->GetUsage());
  1201. }
  1202. }
  1203. {
  1204. OffsetableCacheKey base_cache_key(db_id_, db_session_id_, kBlobFileNumber);
  1205. size_t blob_bytes = options_.blob_cache->GetUsage();
  1206. for (size_t i = 0; i < kNumBlobs; ++i) {
  1207. size_t charge = 0;
  1208. ASSERT_TRUE(blob_source.TEST_BlobInCache(kBlobFileNumber, blob_file_size_,
  1209. blob_offsets[i], &charge));
  1210. CacheKey cache_key = base_cache_key.WithOffset(blob_offsets[i]);
  1211. // We didn't call options_.blob_cache->Erase() here, this is because
  1212. // the cache wrapper's Erase() method must be called to update the
  1213. // cache usage after erasing the cache entry.
  1214. blob_source.GetBlobCache()->Erase(cache_key.AsSlice());
  1215. if (i == kNumBlobs - 1) {
  1216. // All the blobs got removed from the cache. cache_res_mgr should not
  1217. // reserve any space for them.
  1218. ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0);
  1219. } else {
  1220. ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), kSizeDummyEntry);
  1221. }
  1222. blob_bytes -= charge;
  1223. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), blob_bytes);
  1224. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(),
  1225. options_.blob_cache->GetUsage());
  1226. }
  1227. }
  1228. }
  1229. TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservation) {
  1230. options_.cf_paths.emplace_back(
  1231. test::PerThreadDBPath(
  1232. env_, "BlobSourceCacheReservationTest_IncreaseCacheReservation"),
  1233. 0);
  1234. GenerateKeysAndBlobs();
  1235. DestroyAndReopen(options_);
  1236. ImmutableOptions immutable_options(options_);
  1237. MutableCFOptions mutable_cf_options(options_);
  1238. constexpr size_t blob_size = 24 << 10; // 24KB
  1239. for (size_t i = 0; i < kNumBlobs; ++i) {
  1240. blob_file_size_ -= blobs_[i].size(); // old blob size
  1241. blob_strs_[i].resize(blob_size, '@');
  1242. blobs_[i] = Slice(blob_strs_[i]);
  1243. blob_file_size_ += blobs_[i].size(); // new blob size
  1244. }
  1245. std::vector<uint64_t> blob_offsets(keys_.size());
  1246. std::vector<uint64_t> blob_sizes(keys_.size());
  1247. constexpr ExpirationRange expiration_range;
  1248. WriteBlobFile(immutable_options, kColumnFamilyId, kHasTTL, expiration_range,
  1249. expiration_range, kBlobFileNumber, keys_, blobs_,
  1250. kNoCompression, blob_offsets, blob_sizes);
  1251. constexpr size_t capacity = 10;
  1252. std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
  1253. FileOptions file_options;
  1254. constexpr HistogramImpl* blob_file_read_hist = nullptr;
  1255. std::unique_ptr<BlobFileCache> blob_file_cache =
  1256. std::make_unique<BlobFileCache>(
  1257. backing_cache.get(), &immutable_options, &file_options,
  1258. kColumnFamilyId, blob_file_read_hist, nullptr /*IOTracer*/);
  1259. BlobSource blob_source(immutable_options, mutable_cf_options, db_id_,
  1260. db_session_id_, blob_file_cache.get());
  1261. ConcurrentCacheReservationManager* cache_res_mgr =
  1262. static_cast<ChargedCache*>(blob_source.GetBlobCache())
  1263. ->TEST_GetCacheReservationManager();
  1264. ASSERT_NE(cache_res_mgr, nullptr);
  1265. ReadOptions read_options;
  1266. read_options.verify_checksums = true;
  1267. {
  1268. read_options.fill_cache = false;
  1269. std::vector<PinnableSlice> values(keys_.size());
  1270. for (size_t i = 0; i < kNumBlobs; ++i) {
  1271. ASSERT_OK(blob_source.GetBlob(
  1272. read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
  1273. blob_file_size_, blob_sizes[i], kNoCompression,
  1274. nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
  1275. ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0);
  1276. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), 0);
  1277. }
  1278. }
  1279. {
  1280. read_options.fill_cache = true;
  1281. std::vector<PinnableSlice> values(keys_.size());
  1282. uint64_t blob_bytes = 0;
  1283. for (size_t i = 0; i < kNumBlobs; ++i) {
  1284. ASSERT_OK(blob_source.GetBlob(
  1285. read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
  1286. blob_file_size_, blob_sizes[i], kNoCompression,
  1287. nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
  1288. // Release cache handle
  1289. values[i].Reset();
  1290. size_t charge = 0;
  1291. ASSERT_TRUE(blob_source.TEST_BlobInCache(kBlobFileNumber, blob_file_size_,
  1292. blob_offsets[i], &charge));
  1293. blob_bytes += charge;
  1294. ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(),
  1295. (blob_bytes <= kSizeDummyEntry) ? kSizeDummyEntry
  1296. : (2 * kSizeDummyEntry));
  1297. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), blob_bytes);
  1298. ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(),
  1299. options_.blob_cache->GetUsage());
  1300. }
  1301. }
  1302. }
  1303. } // namespace ROCKSDB_NAMESPACE
  1304. int main(int argc, char** argv) {
  1305. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  1306. ::testing::InitGoogleTest(&argc, argv);
  1307. return RUN_ALL_TESTS();
  1308. }