db_properties_test.cc 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include <algorithm>
  10. #include <cstdio>
  11. #include <string>
  12. #include "db/db_test_util.h"
  13. #include "db/write_stall_stats.h"
  14. #include "options/cf_options.h"
  15. #include "port/stack_trace.h"
  16. #include "rocksdb/listener.h"
  17. #include "rocksdb/options.h"
  18. #include "rocksdb/perf_context.h"
  19. #include "rocksdb/perf_level.h"
  20. #include "rocksdb/table.h"
  21. #include "table/block_based/block.h"
  22. #include "table/format.h"
  23. #include "table/meta_blocks.h"
  24. #include "table/table_builder.h"
  25. #include "test_util/mock_time_env.h"
  26. #include "util/random.h"
  27. #include "util/string_util.h"
  28. namespace ROCKSDB_NAMESPACE {
  29. class DBPropertiesTest : public DBTestBase {
  30. public:
  31. DBPropertiesTest()
  32. : DBTestBase("db_properties_test", /*env_do_fsync=*/false) {}
  33. void AssertDbStats(const std::map<std::string, std::string>& db_stats,
  34. double expected_uptime, int expected_user_bytes_written,
  35. int expected_wal_bytes_written,
  36. int expected_user_writes_by_self,
  37. int expected_user_writes_with_wal) {
  38. ASSERT_EQ(std::to_string(expected_uptime), db_stats.at("db.uptime"));
  39. ASSERT_EQ(std::to_string(expected_wal_bytes_written),
  40. db_stats.at("db.wal_bytes_written"));
  41. ASSERT_EQ("0", db_stats.at("db.wal_syncs"));
  42. ASSERT_EQ(std::to_string(expected_user_bytes_written),
  43. db_stats.at("db.user_bytes_written"));
  44. ASSERT_EQ("0", db_stats.at("db.user_writes_by_other"));
  45. ASSERT_EQ(std::to_string(expected_user_writes_by_self),
  46. db_stats.at("db.user_writes_by_self"));
  47. ASSERT_EQ(std::to_string(expected_user_writes_with_wal),
  48. db_stats.at("db.user_writes_with_wal"));
  49. ASSERT_EQ("0", db_stats.at("db.user_write_stall_micros"));
  50. }
  51. };
  52. TEST_F(DBPropertiesTest, Empty) {
  53. do {
  54. Options options;
  55. options.env = env_;
  56. options.write_buffer_size = 100000; // Small write buffer
  57. options.allow_concurrent_memtable_write = false;
  58. options = CurrentOptions(options);
  59. CreateAndReopenWithCF({"pikachu"}, options);
  60. std::string num;
  61. ASSERT_TRUE(dbfull()->GetProperty(
  62. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  63. ASSERT_EQ("0", num);
  64. ASSERT_OK(Put(1, "foo", "v1"));
  65. ASSERT_EQ("v1", Get(1, "foo"));
  66. ASSERT_TRUE(dbfull()->GetProperty(
  67. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  68. ASSERT_EQ("1", num);
  69. // Block sync calls
  70. env_->delay_sstable_sync_.store(true, std::memory_order_release);
  71. ASSERT_OK(Put(1, "k1", std::string(100000, 'x'))); // Fill memtable
  72. ASSERT_TRUE(dbfull()->GetProperty(
  73. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  74. ASSERT_EQ("2", num);
  75. ASSERT_OK(Put(1, "k2", std::string(100000, 'y'))); // Trigger compaction
  76. ASSERT_TRUE(dbfull()->GetProperty(
  77. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  78. ASSERT_EQ("1", num);
  79. ASSERT_EQ("v1", Get(1, "foo"));
  80. // Release sync calls
  81. env_->delay_sstable_sync_.store(false, std::memory_order_release);
  82. ASSERT_OK(db_->DisableFileDeletions());
  83. ASSERT_TRUE(
  84. dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
  85. ASSERT_EQ("0", num);
  86. ASSERT_OK(db_->DisableFileDeletions());
  87. ASSERT_TRUE(
  88. dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
  89. ASSERT_EQ("0", num);
  90. ASSERT_OK(db_->DisableFileDeletions());
  91. ASSERT_TRUE(
  92. dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
  93. ASSERT_EQ("0", num);
  94. ASSERT_OK(db_->EnableFileDeletions());
  95. ASSERT_TRUE(
  96. dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
  97. ASSERT_EQ("0", num);
  98. ASSERT_OK(db_->EnableFileDeletions());
  99. ASSERT_TRUE(
  100. dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
  101. ASSERT_EQ("0", num);
  102. // File deletion enabled after `EnableFileDeletions` called as many times
  103. // as `DisableFileDeletions`.
  104. ASSERT_OK(db_->EnableFileDeletions());
  105. ASSERT_TRUE(
  106. dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
  107. ASSERT_EQ("1", num);
  108. } while (ChangeOptions());
  109. }
  110. TEST_F(DBPropertiesTest, CurrentVersionNumber) {
  111. uint64_t v1, v2, v3;
  112. ASSERT_TRUE(
  113. dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1));
  114. ASSERT_OK(Put("12345678", ""));
  115. ASSERT_TRUE(
  116. dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2));
  117. ASSERT_OK(Flush());
  118. ASSERT_TRUE(
  119. dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3));
  120. ASSERT_EQ(v1, v2);
  121. ASSERT_GT(v3, v2);
  122. }
  123. TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) {
  124. const int kKeySize = 100;
  125. const int kValueSize = 500;
  126. const int kKeyNum = 100;
  127. Options options;
  128. options.env = env_;
  129. options.create_if_missing = true;
  130. options.write_buffer_size = (kKeySize + kValueSize) * kKeyNum / 10;
  131. // Make them never flush
  132. options.min_write_buffer_number_to_merge = 1000;
  133. options.max_write_buffer_number = 1000;
  134. options = CurrentOptions(options);
  135. CreateAndReopenWithCF({"one", "two", "three", "four"}, options);
  136. Random rnd(301);
  137. for (auto* handle : handles_) {
  138. for (int i = 0; i < kKeyNum; ++i) {
  139. ASSERT_OK(db_->Put(WriteOptions(), handle, rnd.RandomString(kKeySize),
  140. rnd.RandomString(kValueSize)));
  141. }
  142. }
  143. uint64_t manual_sum = 0;
  144. uint64_t api_sum = 0;
  145. uint64_t value = 0;
  146. for (auto* handle : handles_) {
  147. ASSERT_TRUE(
  148. db_->GetIntProperty(handle, DB::Properties::kSizeAllMemTables, &value));
  149. manual_sum += value;
  150. }
  151. ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables,
  152. &api_sum));
  153. ASSERT_GT(manual_sum, 0);
  154. ASSERT_EQ(manual_sum, api_sum);
  155. ASSERT_FALSE(db_->GetAggregatedIntProperty(DB::Properties::kDBStats, &value));
  156. uint64_t before_flush_trm;
  157. uint64_t after_flush_trm;
  158. for (auto* handle : handles_) {
  159. ASSERT_TRUE(db_->GetAggregatedIntProperty(
  160. DB::Properties::kEstimateTableReadersMem, &before_flush_trm));
  161. // Issue flush and expect larger memory usage of table readers.
  162. ASSERT_OK(db_->Flush(FlushOptions(), handle));
  163. ASSERT_TRUE(db_->GetAggregatedIntProperty(
  164. DB::Properties::kEstimateTableReadersMem, &after_flush_trm));
  165. ASSERT_GT(after_flush_trm, before_flush_trm);
  166. }
  167. }
  168. TEST_F(DBPropertiesTest, AggregateBlockCacheProperty) {
  169. constexpr size_t kCapacity = 1000;
  170. LRUCacheOptions co;
  171. co.capacity = kCapacity;
  172. co.num_shard_bits = 0;
  173. co.metadata_charge_policy = kDontChargeCacheMetadata;
  174. auto block_cache = NewLRUCache(co);
  175. // All columns families share the same block cache.
  176. Options options = CurrentOptions();
  177. BlockBasedTableOptions table_opt;
  178. table_opt.no_block_cache = false;
  179. table_opt.block_cache = block_cache;
  180. options.table_factory.reset(NewBlockBasedTableFactory(table_opt));
  181. CreateAndReopenWithCF({"one", "two", "three", "four"}, options);
  182. // Insert unpinned block to the cache
  183. constexpr size_t kSize1 = 100;
  184. ASSERT_OK(block_cache->Insert("block1", nullptr /*value*/,
  185. &kNoopCacheItemHelper, kSize1));
  186. // Insert pinned block to the cache
  187. constexpr size_t kSize2 = 200;
  188. Cache::Handle* block2 = nullptr;
  189. ASSERT_OK(block_cache->Insert("block2", nullptr /*value*/,
  190. &kNoopCacheItemHelper, kSize2, &block2));
  191. uint64_t value;
  192. ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kBlockCacheCapacity,
  193. &value));
  194. ASSERT_EQ(value, kCapacity);
  195. ASSERT_TRUE(
  196. db_->GetAggregatedIntProperty(DB::Properties::kBlockCacheUsage, &value));
  197. ASSERT_EQ(value, kSize1 + kSize2);
  198. ASSERT_TRUE(db_->GetAggregatedIntProperty(
  199. DB::Properties::kBlockCachePinnedUsage, &value));
  200. ASSERT_EQ(value, kSize2);
  201. block_cache->Release(block2);
  202. }
  203. namespace {
  204. void VerifySimilar(uint64_t a, uint64_t b, double bias) {
  205. ASSERT_EQ(a == 0U, b == 0U);
  206. if (a == 0) {
  207. return;
  208. }
  209. double dbl_a = static_cast<double>(a);
  210. double dbl_b = static_cast<double>(b);
  211. if (dbl_a > dbl_b) {
  212. ASSERT_LT(static_cast<double>(dbl_a - dbl_b) / (dbl_a + dbl_b), bias);
  213. } else {
  214. ASSERT_LT(static_cast<double>(dbl_b - dbl_a) / (dbl_a + dbl_b), bias);
  215. }
  216. }
  217. void VerifyTableProperties(
  218. const TableProperties& base_tp, const TableProperties& new_tp,
  219. double filter_size_bias = CACHE_LINE_SIZE >= 256 ? 0.18 : 0.1,
  220. double index_size_bias = 0.1, double data_size_bias = 0.1,
  221. double num_data_blocks_bias = 0.05) {
  222. VerifySimilar(base_tp.data_size, new_tp.data_size, data_size_bias);
  223. VerifySimilar(base_tp.index_size, new_tp.index_size, index_size_bias);
  224. VerifySimilar(base_tp.filter_size, new_tp.filter_size, filter_size_bias);
  225. VerifySimilar(base_tp.num_data_blocks, new_tp.num_data_blocks,
  226. num_data_blocks_bias);
  227. ASSERT_EQ(base_tp.raw_key_size, new_tp.raw_key_size);
  228. ASSERT_EQ(base_tp.raw_value_size, new_tp.raw_value_size);
  229. ASSERT_EQ(base_tp.num_entries, new_tp.num_entries);
  230. ASSERT_EQ(base_tp.num_deletions, new_tp.num_deletions);
  231. ASSERT_EQ(base_tp.num_range_deletions, new_tp.num_range_deletions);
  232. // Merge operands may become Puts, so we only have an upper bound the exact
  233. // number of merge operands.
  234. ASSERT_GE(base_tp.num_merge_operands, new_tp.num_merge_operands);
  235. }
  236. void GetExpectedTableProperties(
  237. TableProperties* expected_tp, const int kKeySize, const int kValueSize,
  238. const int kPutsPerTable, const int kDeletionsPerTable,
  239. const int kMergeOperandsPerTable, const int kRangeDeletionsPerTable,
  240. const int kTableCount, const int kBloomBitsPerKey, const size_t kBlockSize,
  241. const bool index_key_is_user_key, const bool value_delta_encoding) {
  242. const int kKeysPerTable =
  243. kPutsPerTable + kDeletionsPerTable + kMergeOperandsPerTable;
  244. const int kPutCount = kTableCount * kPutsPerTable;
  245. const int kDeletionCount = kTableCount * kDeletionsPerTable;
  246. const int kMergeCount = kTableCount * kMergeOperandsPerTable;
  247. const int kRangeDeletionCount = kTableCount * kRangeDeletionsPerTable;
  248. const int kKeyCount =
  249. kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount;
  250. const int kAvgSuccessorSize = kKeySize / 5;
  251. const int kEncodingSavePerKey = kKeySize / 4;
  252. expected_tp->raw_key_size = kKeyCount * (kKeySize + 8);
  253. expected_tp->raw_value_size =
  254. (kPutCount + kMergeCount + kRangeDeletionCount) * kValueSize;
  255. expected_tp->num_entries = kKeyCount;
  256. expected_tp->num_deletions = kDeletionCount + kRangeDeletionCount;
  257. expected_tp->num_merge_operands = kMergeCount;
  258. expected_tp->num_range_deletions = kRangeDeletionCount;
  259. expected_tp->num_data_blocks =
  260. kTableCount *
  261. (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) /
  262. kBlockSize;
  263. expected_tp->data_size =
  264. kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize));
  265. expected_tp->index_size =
  266. expected_tp->num_data_blocks *
  267. (kAvgSuccessorSize + (index_key_is_user_key ? 0 : 8) -
  268. // discount 1 byte as value size is not encoded in value delta encoding
  269. (value_delta_encoding ? 1 : 0));
  270. expected_tp->filter_size =
  271. kTableCount * ((kKeysPerTable * kBloomBitsPerKey + 7) / 8 +
  272. /*average-ish overhead*/ CACHE_LINE_SIZE / 2);
  273. }
  274. } // anonymous namespace
  275. TEST_F(DBPropertiesTest, ValidatePropertyInfo) {
  276. for (const auto& ppt_name_and_info : InternalStats::ppt_name_to_info) {
  277. // If C++ gets a std::string_literal, this would be better to check at
  278. // compile-time using static_assert.
  279. ASSERT_TRUE(ppt_name_and_info.first.empty() ||
  280. !isdigit(ppt_name_and_info.first.back()));
  281. int count = 0;
  282. count += (ppt_name_and_info.second.handle_string == nullptr) ? 0 : 1;
  283. count += (ppt_name_and_info.second.handle_int == nullptr) ? 0 : 1;
  284. count += (ppt_name_and_info.second.handle_string_dbimpl == nullptr) ? 0 : 1;
  285. ASSERT_TRUE(count == 1);
  286. }
  287. }
  288. TEST_F(DBPropertiesTest, ValidateSampleNumber) {
  289. // When "max_open_files" is -1, we read all the files for
  290. // "rocksdb.estimate-num-keys" computation, which is the ground truth.
  291. // Otherwise, we sample 20 newest files to make an estimation.
  292. // Formula: lastest_20_files_active_key_ratio * total_files
  293. Options options = CurrentOptions();
  294. options.disable_auto_compactions = true;
  295. options.level0_stop_writes_trigger = 1000;
  296. DestroyAndReopen(options);
  297. int key = 0;
  298. for (int files = 20; files >= 10; files -= 10) {
  299. for (int i = 0; i < files; i++) {
  300. int rows = files / 10;
  301. for (int j = 0; j < rows; j++) {
  302. ASSERT_OK(db_->Put(WriteOptions(), std::to_string(++key), "foo"));
  303. }
  304. ASSERT_OK(db_->Flush(FlushOptions()));
  305. }
  306. }
  307. std::string num;
  308. Reopen(options);
  309. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
  310. ASSERT_EQ("45", num);
  311. options.max_open_files = -1;
  312. Reopen(options);
  313. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
  314. ASSERT_EQ("50", num);
  315. }
  316. TEST_F(DBPropertiesTest, AggregatedTableProperties) {
  317. for (int kTableCount = 40; kTableCount <= 100; kTableCount += 30) {
  318. const int kDeletionsPerTable = 0;
  319. const int kMergeOperandsPerTable = 15;
  320. const int kRangeDeletionsPerTable = 5;
  321. const int kPutsPerTable = 100;
  322. const int kKeySize = 80;
  323. const int kValueSize = 200;
  324. const int kBloomBitsPerKey = 20;
  325. Options options = CurrentOptions();
  326. options.level0_file_num_compaction_trigger = 8;
  327. options.compression = kNoCompression;
  328. options.create_if_missing = true;
  329. options.merge_operator.reset(new TestPutOperator());
  330. BlockBasedTableOptions table_options;
  331. table_options.filter_policy.reset(
  332. NewBloomFilterPolicy(kBloomBitsPerKey, false));
  333. table_options.block_size = 1024;
  334. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  335. // The checks assume kTableCount number of files
  336. options.disable_auto_compactions = true;
  337. DestroyAndReopen(options);
  338. // Hold open a snapshot to prevent range tombstones from being compacted
  339. // away.
  340. ManagedSnapshot snapshot(db_);
  341. Random rnd(5632);
  342. for (int table = 1; table <= kTableCount; ++table) {
  343. for (int i = 0; i < kPutsPerTable; ++i) {
  344. ASSERT_OK(db_->Put(WriteOptions(), rnd.RandomString(kKeySize),
  345. rnd.RandomString(kValueSize)));
  346. }
  347. for (int i = 0; i < kDeletionsPerTable; i++) {
  348. ASSERT_OK(db_->Delete(WriteOptions(), rnd.RandomString(kKeySize)));
  349. }
  350. for (int i = 0; i < kMergeOperandsPerTable; i++) {
  351. ASSERT_OK(db_->Merge(WriteOptions(), rnd.RandomString(kKeySize),
  352. rnd.RandomString(kValueSize)));
  353. }
  354. for (int i = 0; i < kRangeDeletionsPerTable; i++) {
  355. std::string start = rnd.RandomString(kKeySize);
  356. std::string end = start;
  357. end.resize(kValueSize);
  358. ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
  359. start, end));
  360. }
  361. ASSERT_OK(db_->Flush(FlushOptions()));
  362. }
  363. std::string property;
  364. db_->GetProperty(DB::Properties::kAggregatedTableProperties, &property);
  365. TableProperties output_tp;
  366. ParseTablePropertiesString(property, &output_tp);
  367. bool index_key_is_user_key = output_tp.index_key_is_user_key > 0;
  368. bool value_is_delta_encoded = output_tp.index_value_is_delta_encoded > 0;
  369. TableProperties expected_tp;
  370. GetExpectedTableProperties(
  371. &expected_tp, kKeySize, kValueSize, kPutsPerTable, kDeletionsPerTable,
  372. kMergeOperandsPerTable, kRangeDeletionsPerTable, kTableCount,
  373. kBloomBitsPerKey, table_options.block_size, index_key_is_user_key,
  374. value_is_delta_encoded);
  375. VerifyTableProperties(expected_tp, output_tp);
  376. }
  377. }
  378. TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) {
  379. Options options = CurrentOptions();
  380. options.write_buffer_size = 110 << 10;
  381. options.level0_file_num_compaction_trigger = 6;
  382. options.num_levels = 4;
  383. options.compression = kNoCompression;
  384. options.max_bytes_for_level_base = 4500 << 10;
  385. options.target_file_size_base = 98 << 10;
  386. options.max_write_buffer_number = 2;
  387. options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
  388. options.max_open_files = 11; // Make sure no proloading of table readers
  389. // RocksDB sanitize max open files to at least 20. Modify it back.
  390. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  391. "SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg) {
  392. int* max_open_files = static_cast<int*>(arg);
  393. *max_open_files = 11;
  394. });
  395. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  396. BlockBasedTableOptions table_options;
  397. table_options.no_block_cache = true;
  398. CreateAndReopenWithCF({"pikachu"}, options);
  399. int key_index = 0;
  400. Random rnd(301);
  401. for (int num = 0; num < 8; num++) {
  402. ASSERT_OK(Put("foo", "bar"));
  403. GenerateNewFile(&rnd, &key_index);
  404. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  405. }
  406. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  407. std::string prop;
  408. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
  409. // Get() after flushes, See latency histogram tracked.
  410. for (int key = 0; key < key_index; key++) {
  411. Get(Key(key));
  412. }
  413. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
  414. ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
  415. ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
  416. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  417. // Reopen and issue Get(). See thee latency tracked
  418. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  419. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  420. for (int key = 0; key < key_index; key++) {
  421. Get(Key(key));
  422. }
  423. // Test for getting immutable_db_options_.statistics
  424. ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
  425. "rocksdb.options-statistics", &prop));
  426. ASSERT_NE(std::string::npos, prop.find("rocksdb.block.cache.miss"));
  427. ASSERT_EQ(std::string::npos, prop.find("rocksdb.db.f.micros"));
  428. ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
  429. "rocksdb.cf-file-histogram", &prop));
  430. ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
  431. ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
  432. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  433. // Reopen and issue iterating. See thee latency tracked
  434. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  435. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  436. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
  437. ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
  438. ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
  439. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  440. {
  441. std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
  442. for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
  443. }
  444. ASSERT_OK(iter->status());
  445. }
  446. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
  447. ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
  448. ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
  449. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  450. // CF 1 should show no histogram.
  451. ASSERT_TRUE(
  452. dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
  453. ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
  454. ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
  455. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  456. // put something and read it back , CF 1 should show histogram.
  457. ASSERT_OK(Put(1, "foo", "bar"));
  458. ASSERT_OK(Flush(1));
  459. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  460. ASSERT_EQ("bar", Get(1, "foo"));
  461. ASSERT_TRUE(
  462. dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
  463. ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
  464. ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
  465. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  466. // options.max_open_files preloads table readers.
  467. options.max_open_files = -1;
  468. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  469. ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
  470. "rocksdb.cf-file-histogram", &prop));
  471. ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
  472. ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
  473. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  474. for (int key = 0; key < key_index; key++) {
  475. Get(Key(key));
  476. }
  477. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
  478. ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
  479. ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
  480. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  481. // Clear internal stats
  482. ASSERT_OK(dbfull()->ResetStats());
  483. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
  484. ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
  485. ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
  486. ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
  487. }
  488. TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
  489. const int kTableCount = 100;
  490. const int kDeletionsPerTable = 0;
  491. const int kMergeOperandsPerTable = 2;
  492. const int kRangeDeletionsPerTable = 2;
  493. const int kPutsPerTable = 10;
  494. const int kKeySize = 50;
  495. const int kValueSize = 400;
  496. const int kMaxLevel = 7;
  497. const int kBloomBitsPerKey = 20;
  498. Random rnd(301);
  499. Options options = CurrentOptions();
  500. options.level0_file_num_compaction_trigger = 8;
  501. options.compression = kNoCompression;
  502. options.create_if_missing = true;
  503. options.level0_file_num_compaction_trigger = 2;
  504. options.target_file_size_base = 8192;
  505. options.max_bytes_for_level_base = 10000;
  506. options.max_bytes_for_level_multiplier = 2;
  507. // The checks assume kTableCount number of files
  508. options.disable_auto_compactions = true;
  509. options.merge_operator.reset(new TestPutOperator());
  510. BlockBasedTableOptions table_options;
  511. table_options.filter_policy.reset(
  512. NewBloomFilterPolicy(kBloomBitsPerKey, false));
  513. table_options.block_size = 1024;
  514. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  515. DestroyAndReopen(options);
  516. // Hold open a snapshot to prevent range tombstones from being compacted away.
  517. ManagedSnapshot snapshot(db_);
  518. std::string level_tp_strings[kMaxLevel];
  519. std::string tp_string;
  520. TableProperties level_tps[kMaxLevel];
  521. TableProperties tp, sum_tp, expected_tp;
  522. for (int table = 1; table <= kTableCount; ++table) {
  523. for (int i = 0; i < kPutsPerTable; ++i) {
  524. ASSERT_OK(db_->Put(WriteOptions(), rnd.RandomString(kKeySize),
  525. rnd.RandomString(kValueSize)));
  526. }
  527. for (int i = 0; i < kDeletionsPerTable; i++) {
  528. ASSERT_OK(db_->Delete(WriteOptions(), rnd.RandomString(kKeySize)));
  529. }
  530. for (int i = 0; i < kMergeOperandsPerTable; i++) {
  531. ASSERT_OK(db_->Merge(WriteOptions(), rnd.RandomString(kKeySize),
  532. rnd.RandomString(kValueSize)));
  533. }
  534. for (int i = 0; i < kRangeDeletionsPerTable; i++) {
  535. std::string start = rnd.RandomString(kKeySize);
  536. std::string end = start;
  537. end.resize(kValueSize);
  538. ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
  539. start, end));
  540. }
  541. ASSERT_OK(db_->Flush(FlushOptions()));
  542. ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  543. ResetTableProperties(&sum_tp);
  544. for (int level = 0; level < kMaxLevel; ++level) {
  545. db_->GetProperty(DB::Properties::kAggregatedTablePropertiesAtLevel +
  546. std::to_string(level),
  547. &level_tp_strings[level]);
  548. ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
  549. sum_tp.data_size += level_tps[level].data_size;
  550. sum_tp.index_size += level_tps[level].index_size;
  551. sum_tp.filter_size += level_tps[level].filter_size;
  552. sum_tp.raw_key_size += level_tps[level].raw_key_size;
  553. sum_tp.raw_value_size += level_tps[level].raw_value_size;
  554. sum_tp.num_data_blocks += level_tps[level].num_data_blocks;
  555. sum_tp.num_entries += level_tps[level].num_entries;
  556. sum_tp.num_deletions += level_tps[level].num_deletions;
  557. sum_tp.num_merge_operands += level_tps[level].num_merge_operands;
  558. sum_tp.num_range_deletions += level_tps[level].num_range_deletions;
  559. }
  560. db_->GetProperty(DB::Properties::kAggregatedTableProperties, &tp_string);
  561. ParseTablePropertiesString(tp_string, &tp);
  562. bool index_key_is_user_key = tp.index_key_is_user_key > 0;
  563. bool value_is_delta_encoded = tp.index_value_is_delta_encoded > 0;
  564. ASSERT_EQ(sum_tp.data_size, tp.data_size);
  565. ASSERT_EQ(sum_tp.index_size, tp.index_size);
  566. ASSERT_EQ(sum_tp.filter_size, tp.filter_size);
  567. ASSERT_EQ(sum_tp.raw_key_size, tp.raw_key_size);
  568. ASSERT_EQ(sum_tp.raw_value_size, tp.raw_value_size);
  569. ASSERT_EQ(sum_tp.num_data_blocks, tp.num_data_blocks);
  570. ASSERT_EQ(sum_tp.num_entries, tp.num_entries);
  571. ASSERT_EQ(sum_tp.num_deletions, tp.num_deletions);
  572. ASSERT_EQ(sum_tp.num_merge_operands, tp.num_merge_operands);
  573. ASSERT_EQ(sum_tp.num_range_deletions, tp.num_range_deletions);
  574. if (table > 3) {
  575. GetExpectedTableProperties(
  576. &expected_tp, kKeySize, kValueSize, kPutsPerTable, kDeletionsPerTable,
  577. kMergeOperandsPerTable, kRangeDeletionsPerTable, table,
  578. kBloomBitsPerKey, table_options.block_size, index_key_is_user_key,
  579. value_is_delta_encoded);
  580. // Gives larger bias here as index block size, filter block size,
  581. // and data block size become much harder to estimate in this test.
  582. VerifyTableProperties(expected_tp, tp, CACHE_LINE_SIZE >= 256 ? 0.6 : 0.5,
  583. 0.5, 0.5, 0.25);
  584. }
  585. }
  586. }
  587. TEST_F(DBPropertiesTest, NumImmutableMemTable) {
  588. do {
  589. Options options = CurrentOptions();
  590. WriteOptions writeOpt = WriteOptions();
  591. writeOpt.disableWAL = true;
  592. options.max_write_buffer_number = 4;
  593. options.min_write_buffer_number_to_merge = 3;
  594. options.write_buffer_size = 1000000;
  595. options.max_write_buffer_size_to_maintain =
  596. 5 * static_cast<int64_t>(options.write_buffer_size);
  597. CreateAndReopenWithCF({"pikachu"}, options);
  598. std::string big_value(1000000 * 2, 'x');
  599. std::string num;
  600. uint64_t value;
  601. SetPerfLevel(kEnableTime);
  602. ASSERT_TRUE(GetPerfLevel() == kEnableTime);
  603. ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k1", big_value));
  604. ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
  605. "rocksdb.num-immutable-mem-table", &num));
  606. ASSERT_EQ(num, "0");
  607. ASSERT_TRUE(dbfull()->GetProperty(
  608. handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
  609. ASSERT_EQ(num, "0");
  610. ASSERT_TRUE(dbfull()->GetProperty(
  611. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  612. ASSERT_EQ(num, "1");
  613. get_perf_context()->Reset();
  614. Get(1, "k1");
  615. ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
  616. ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
  617. ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
  618. "rocksdb.num-immutable-mem-table", &num));
  619. ASSERT_EQ(num, "1");
  620. ASSERT_TRUE(dbfull()->GetProperty(
  621. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  622. ASSERT_EQ(num, "1");
  623. ASSERT_TRUE(dbfull()->GetProperty(
  624. handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
  625. ASSERT_EQ(num, "1");
  626. get_perf_context()->Reset();
  627. Get(1, "k1");
  628. ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
  629. get_perf_context()->Reset();
  630. Get(1, "k2");
  631. ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
  632. ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", big_value));
  633. ASSERT_TRUE(dbfull()->GetProperty(
  634. handles_[1], "rocksdb.cur-size-active-mem-table", &num));
  635. ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
  636. "rocksdb.num-immutable-mem-table", &num));
  637. ASSERT_EQ(num, "2");
  638. ASSERT_TRUE(dbfull()->GetProperty(
  639. handles_[1], "rocksdb.num-entries-active-mem-table", &num));
  640. ASSERT_EQ(num, "1");
  641. ASSERT_TRUE(dbfull()->GetProperty(
  642. handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
  643. ASSERT_EQ(num, "2");
  644. get_perf_context()->Reset();
  645. Get(1, "k2");
  646. ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
  647. get_perf_context()->Reset();
  648. Get(1, "k3");
  649. ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
  650. get_perf_context()->Reset();
  651. Get(1, "k1");
  652. ASSERT_EQ(3, static_cast<int>(get_perf_context()->get_from_memtable_count));
  653. ASSERT_OK(Flush(1));
  654. ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
  655. "rocksdb.num-immutable-mem-table", &num));
  656. ASSERT_EQ(num, "0");
  657. ASSERT_TRUE(dbfull()->GetProperty(
  658. handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
  659. ASSERT_EQ(num, "3");
  660. ASSERT_TRUE(dbfull()->GetIntProperty(
  661. handles_[1], "rocksdb.cur-size-active-mem-table", &value));
  662. // "192" is the size of the metadata of two empty skiplists, this would
  663. // break if we change the default skiplist implementation
  664. ASSERT_GE(value, 192);
  665. uint64_t int_num;
  666. uint64_t base_total_size;
  667. ASSERT_TRUE(dbfull()->GetIntProperty(
  668. handles_[1], "rocksdb.estimate-num-keys", &base_total_size));
  669. ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k2"));
  670. ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", ""));
  671. ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k3"));
  672. ASSERT_TRUE(dbfull()->GetIntProperty(
  673. handles_[1], "rocksdb.num-deletes-active-mem-table", &int_num));
  674. ASSERT_EQ(int_num, 2U);
  675. ASSERT_TRUE(dbfull()->GetIntProperty(
  676. handles_[1], "rocksdb.num-entries-active-mem-table", &int_num));
  677. ASSERT_EQ(int_num, 3U);
  678. ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
  679. ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
  680. ASSERT_TRUE(dbfull()->GetIntProperty(
  681. handles_[1], "rocksdb.num-entries-imm-mem-tables", &int_num));
  682. ASSERT_EQ(int_num, 4U);
  683. ASSERT_TRUE(dbfull()->GetIntProperty(
  684. handles_[1], "rocksdb.num-deletes-imm-mem-tables", &int_num));
  685. ASSERT_EQ(int_num, 2U);
  686. ASSERT_TRUE(dbfull()->GetIntProperty(
  687. handles_[1], "rocksdb.estimate-num-keys", &int_num));
  688. ASSERT_EQ(int_num, base_total_size + 1);
  689. SetPerfLevel(kDisable);
  690. ASSERT_TRUE(GetPerfLevel() == kDisable);
  691. } while (ChangeCompactOptions());
  692. }
  693. // TODO(techdept) : Disabled flaky test #12863555
  694. TEST_F(DBPropertiesTest, DISABLED_GetProperty) {
  695. // Set sizes to both background thread pool to be 1 and block them.
  696. env_->SetBackgroundThreads(1, Env::HIGH);
  697. env_->SetBackgroundThreads(1, Env::LOW);
  698. test::SleepingBackgroundTask sleeping_task_low;
  699. env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
  700. Env::Priority::LOW);
  701. test::SleepingBackgroundTask sleeping_task_high;
  702. env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
  703. &sleeping_task_high, Env::Priority::HIGH);
  704. Options options = CurrentOptions();
  705. WriteOptions writeOpt = WriteOptions();
  706. writeOpt.disableWAL = true;
  707. options.compaction_style = kCompactionStyleUniversal;
  708. options.level0_file_num_compaction_trigger = 1;
  709. options.compaction_options_universal.size_ratio = 50;
  710. options.max_background_compactions = 1;
  711. options.max_background_flushes = 1;
  712. options.max_write_buffer_number = 10;
  713. options.min_write_buffer_number_to_merge = 1;
  714. options.max_write_buffer_size_to_maintain = 0;
  715. options.write_buffer_size = 1000000;
  716. Reopen(options);
  717. std::string big_value(1000000 * 2, 'x');
  718. std::string num;
  719. uint64_t int_num;
  720. SetPerfLevel(kEnableTime);
  721. ASSERT_TRUE(
  722. dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
  723. ASSERT_EQ(int_num, 0U);
  724. ASSERT_TRUE(
  725. dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num));
  726. ASSERT_EQ(int_num, 0U);
  727. ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
  728. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
  729. ASSERT_EQ(num, "0");
  730. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
  731. ASSERT_EQ(num, "0");
  732. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
  733. ASSERT_EQ(num, "0");
  734. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
  735. ASSERT_EQ(num, "1");
  736. get_perf_context()->Reset();
  737. ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
  738. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
  739. ASSERT_EQ(num, "1");
  740. ASSERT_OK(dbfull()->Delete(writeOpt, "k-non-existing"));
  741. ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
  742. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
  743. ASSERT_EQ(num, "2");
  744. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
  745. ASSERT_EQ(num, "1");
  746. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
  747. ASSERT_EQ(num, "0");
  748. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
  749. ASSERT_EQ(num, "2");
  750. // Verify the same set of properties through GetIntProperty
  751. ASSERT_TRUE(
  752. dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num));
  753. ASSERT_EQ(int_num, 2U);
  754. ASSERT_TRUE(
  755. dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num));
  756. ASSERT_EQ(int_num, 1U);
  757. ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num));
  758. ASSERT_EQ(int_num, 0U);
  759. ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
  760. ASSERT_EQ(int_num, 2U);
  761. ASSERT_TRUE(
  762. dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
  763. ASSERT_EQ(int_num, 0U);
  764. sleeping_task_high.WakeUp();
  765. sleeping_task_high.WaitUntilDone();
  766. dbfull()->TEST_WaitForFlushMemTable();
  767. ASSERT_OK(dbfull()->Put(writeOpt, "k4", big_value));
  768. ASSERT_OK(dbfull()->Put(writeOpt, "k5", big_value));
  769. dbfull()->TEST_WaitForFlushMemTable();
  770. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
  771. ASSERT_EQ(num, "0");
  772. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
  773. ASSERT_EQ(num, "1");
  774. ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
  775. ASSERT_EQ(num, "4");
  776. ASSERT_TRUE(
  777. dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
  778. ASSERT_GT(int_num, 0U);
  779. sleeping_task_low.WakeUp();
  780. sleeping_task_low.WaitUntilDone();
  781. // Wait for compaction to be done. This is important because otherwise RocksDB
  782. // might schedule a compaction when reopening the database, failing assertion
  783. // (A) as a result.
  784. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  785. options.max_open_files = 10;
  786. Reopen(options);
  787. // After reopening, no table reader is loaded, so no memory for table readers
  788. ASSERT_TRUE(
  789. dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
  790. ASSERT_EQ(int_num, 0U); // (A)
  791. ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
  792. ASSERT_GT(int_num, 0U);
  793. // After reading a key, at least one table reader is loaded.
  794. Get("k5");
  795. ASSERT_TRUE(
  796. dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
  797. ASSERT_GT(int_num, 0U);
  798. // Test rocksdb.num-live-versions
  799. {
  800. options.level0_file_num_compaction_trigger = 20;
  801. Reopen(options);
  802. ASSERT_TRUE(
  803. dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
  804. ASSERT_EQ(int_num, 1U);
  805. // Use an iterator to hold current version
  806. std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
  807. ASSERT_OK(dbfull()->Put(writeOpt, "k6", big_value));
  808. ASSERT_OK(Flush());
  809. ASSERT_TRUE(
  810. dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
  811. ASSERT_EQ(int_num, 2U);
  812. // Use an iterator to hold current version
  813. std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
  814. ASSERT_OK(dbfull()->Put(writeOpt, "k7", big_value));
  815. ASSERT_OK(Flush());
  816. ASSERT_TRUE(
  817. dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
  818. ASSERT_EQ(int_num, 3U);
  819. iter2.reset();
  820. ASSERT_TRUE(
  821. dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
  822. ASSERT_EQ(int_num, 2U);
  823. iter1.reset();
  824. ASSERT_TRUE(
  825. dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
  826. ASSERT_EQ(int_num, 1U);
  827. }
  828. }
  829. TEST_F(DBPropertiesTest, ApproximateMemoryUsage) {
  830. const int kNumRounds = 10;
  831. // TODO(noetzli) kFlushesPerRound does not really correlate with how many
  832. // flushes happen.
  833. const int kFlushesPerRound = 10;
  834. const int kWritesPerFlush = 10;
  835. const int kKeySize = 100;
  836. const int kValueSize = 1000;
  837. Options options;
  838. options.write_buffer_size = 1000; // small write buffer
  839. options.min_write_buffer_number_to_merge = 4;
  840. options.compression = kNoCompression;
  841. options.create_if_missing = true;
  842. options = CurrentOptions(options);
  843. DestroyAndReopen(options);
  844. Random rnd(301);
  845. std::vector<Iterator*> iters;
  846. uint64_t active_mem;
  847. uint64_t unflushed_mem;
  848. uint64_t all_mem;
  849. uint64_t prev_all_mem;
  850. // Phase 0. The verify the initial value of all these properties are the same
  851. // as we have no mem-tables.
  852. dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
  853. dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
  854. dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
  855. ASSERT_EQ(all_mem, active_mem);
  856. ASSERT_EQ(all_mem, unflushed_mem);
  857. // Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to
  858. // "size-all-mem-tables"
  859. for (int r = 0; r < kNumRounds; ++r) {
  860. for (int f = 0; f < kFlushesPerRound; ++f) {
  861. for (int w = 0; w < kWritesPerFlush; ++w) {
  862. ASSERT_OK(
  863. Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize)));
  864. }
  865. }
  866. // Make sure that there is no flush between getting the two properties.
  867. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  868. dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
  869. dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
  870. // in no iterator case, these two number should be the same.
  871. ASSERT_EQ(unflushed_mem, all_mem);
  872. }
  873. prev_all_mem = all_mem;
  874. // Phase 2. Keep issuing Put() but also create new iterators. This time we
  875. // expect "size-all-mem-tables" > "cur-size-all-mem-tables".
  876. for (int r = 0; r < kNumRounds; ++r) {
  877. iters.push_back(db_->NewIterator(ReadOptions()));
  878. for (int f = 0; f < kFlushesPerRound; ++f) {
  879. for (int w = 0; w < kWritesPerFlush; ++w) {
  880. ASSERT_OK(
  881. Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize)));
  882. }
  883. }
  884. // Force flush to prevent flush from happening between getting the
  885. // properties or after getting the properties and before the new round.
  886. ASSERT_OK(Flush());
  887. // In the second round, add iterators.
  888. dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
  889. dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
  890. dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
  891. ASSERT_GT(all_mem, active_mem);
  892. ASSERT_GT(all_mem, unflushed_mem);
  893. ASSERT_GT(all_mem, prev_all_mem);
  894. prev_all_mem = all_mem;
  895. }
  896. // Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks
  897. // whenever we release an iterator.
  898. for (auto* iter : iters) {
  899. ASSERT_OK(iter->status());
  900. delete iter;
  901. dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
  902. // Expect the size shrinking
  903. ASSERT_LT(all_mem, prev_all_mem);
  904. prev_all_mem = all_mem;
  905. }
  906. // Expect all these three counters to be the same.
  907. dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
  908. dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
  909. dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
  910. ASSERT_EQ(active_mem, unflushed_mem);
  911. ASSERT_EQ(unflushed_mem, all_mem);
  912. // Phase 5. Reopen, and expect all these three counters to be the same again.
  913. Reopen(options);
  914. dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
  915. dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
  916. dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
  917. ASSERT_EQ(active_mem, unflushed_mem);
  918. ASSERT_EQ(unflushed_mem, all_mem);
  919. }
  920. TEST_F(DBPropertiesTest, EstimatePendingCompBytes) {
  921. // Set sizes to both background thread pool to be 1 and block them.
  922. env_->SetBackgroundThreads(1, Env::HIGH);
  923. env_->SetBackgroundThreads(1, Env::LOW);
  924. test::SleepingBackgroundTask sleeping_task_low;
  925. env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
  926. Env::Priority::LOW);
  927. Options options = CurrentOptions();
  928. WriteOptions writeOpt = WriteOptions();
  929. writeOpt.disableWAL = true;
  930. options.compaction_style = kCompactionStyleLevel;
  931. options.level0_file_num_compaction_trigger = 2;
  932. options.max_background_compactions = 1;
  933. options.max_background_flushes = 1;
  934. options.max_write_buffer_number = 10;
  935. options.min_write_buffer_number_to_merge = 1;
  936. options.max_write_buffer_size_to_maintain = 0;
  937. options.write_buffer_size = 1000000;
  938. Reopen(options);
  939. std::string big_value(1000000 * 2, 'x');
  940. std::string num;
  941. uint64_t int_num;
  942. ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
  943. ASSERT_OK(Flush());
  944. ASSERT_TRUE(dbfull()->GetIntProperty(
  945. "rocksdb.estimate-pending-compaction-bytes", &int_num));
  946. ASSERT_EQ(int_num, 0U);
  947. ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
  948. ASSERT_OK(Flush());
  949. ASSERT_TRUE(dbfull()->GetIntProperty(
  950. "rocksdb.estimate-pending-compaction-bytes", &int_num));
  951. ASSERT_GT(int_num, 0U);
  952. ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
  953. ASSERT_OK(Flush());
  954. ASSERT_TRUE(dbfull()->GetIntProperty(
  955. "rocksdb.estimate-pending-compaction-bytes", &int_num));
  956. ASSERT_GT(int_num, 0U);
  957. sleeping_task_low.WakeUp();
  958. sleeping_task_low.WaitUntilDone();
  959. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  960. ASSERT_TRUE(dbfull()->GetIntProperty(
  961. "rocksdb.estimate-pending-compaction-bytes", &int_num));
  962. ASSERT_EQ(int_num, 0U);
  963. }
  964. TEST_F(DBPropertiesTest, EstimateCompressionRatio) {
  965. if (!Snappy_Supported()) {
  966. return;
  967. }
  968. const int kNumL0Files = 3;
  969. const int kNumEntriesPerFile = 1000;
  970. Options options = CurrentOptions();
  971. options.disable_auto_compactions = true;
  972. options.num_levels = 3;
  973. Reopen(options);
  974. ASSERT_OK(db_->SetOptions(
  975. {{"compression_per_level", "kNoCompression:kSnappyCompression"}}));
  976. auto opts = db_->GetOptions();
  977. ASSERT_EQ(opts.compression_per_level.size(), 2);
  978. ASSERT_EQ(opts.compression_per_level[0], kNoCompression);
  979. ASSERT_EQ(opts.compression_per_level[1], kSnappyCompression);
  980. // compression ratio is -1.0 when no open files at level
  981. ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
  982. const std::string kVal(100, 'a');
  983. for (int i = 0; i < kNumL0Files; ++i) {
  984. for (int j = 0; j < kNumEntriesPerFile; ++j) {
  985. // Put common data ("key") at end to prevent delta encoding from
  986. // compressing the key effectively
  987. std::string key = std::to_string(i) + std::to_string(j) + "key";
  988. ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
  989. }
  990. ASSERT_OK(Flush());
  991. }
  992. // no compression at L0, so ratio is less than one
  993. ASSERT_LT(CompressionRatioAtLevel(0), 1.0);
  994. ASSERT_GT(CompressionRatioAtLevel(0), 0.0);
  995. ASSERT_EQ(CompressionRatioAtLevel(1), -1.0);
  996. ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
  997. ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
  998. // Data at L1 should be highly compressed thanks to Snappy and redundant data
  999. // in values (ratio is 12.846 as of 4/19/2016).
  1000. ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
  1001. }
  1002. class CountingUserTblPropCollector : public TablePropertiesCollector {
  1003. public:
  1004. const char* Name() const override { return "CountingUserTblPropCollector"; }
  1005. Status Finish(UserCollectedProperties* properties) override {
  1006. assert(!finish_called_);
  1007. std::string encoded;
  1008. PutVarint32(&encoded, count_);
  1009. *properties = UserCollectedProperties{
  1010. {"CountingUserTblPropCollector", message_},
  1011. {"Count", encoded},
  1012. };
  1013. finish_called_ = true;
  1014. return Status::OK();
  1015. }
  1016. Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
  1017. EntryType /*type*/, SequenceNumber /*seq*/,
  1018. uint64_t /*file_size*/) override {
  1019. ++count_;
  1020. return Status::OK();
  1021. }
  1022. UserCollectedProperties GetReadableProperties() const override {
  1023. assert(finish_called_);
  1024. return UserCollectedProperties{};
  1025. }
  1026. private:
  1027. std::string message_ = "Rocksdb";
  1028. uint32_t count_ = 0;
  1029. bool finish_called_ = false;
  1030. };
  1031. class CountingUserTblPropCollectorFactory
  1032. : public TablePropertiesCollectorFactory {
  1033. public:
  1034. explicit CountingUserTblPropCollectorFactory(
  1035. uint32_t expected_column_family_id)
  1036. : expected_column_family_id_(expected_column_family_id),
  1037. num_created_(0) {}
  1038. TablePropertiesCollector* CreateTablePropertiesCollector(
  1039. TablePropertiesCollectorFactory::Context context) override {
  1040. EXPECT_EQ(expected_column_family_id_, context.column_family_id);
  1041. num_created_++;
  1042. return new CountingUserTblPropCollector();
  1043. }
  1044. const char* Name() const override {
  1045. return "CountingUserTblPropCollectorFactory";
  1046. }
  1047. void set_expected_column_family_id(uint32_t v) {
  1048. expected_column_family_id_ = v;
  1049. }
  1050. uint32_t expected_column_family_id_;
  1051. uint32_t num_created_;
  1052. };
  1053. class CountingDeleteTabPropCollector : public TablePropertiesCollector {
  1054. public:
  1055. const char* Name() const override { return "CountingDeleteTabPropCollector"; }
  1056. Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
  1057. EntryType type, SequenceNumber /*seq*/,
  1058. uint64_t /*file_size*/) override {
  1059. if (type == kEntryDelete) {
  1060. num_deletes_++;
  1061. }
  1062. return Status::OK();
  1063. }
  1064. bool NeedCompact() const override { return num_deletes_ > 10; }
  1065. UserCollectedProperties GetReadableProperties() const override {
  1066. return UserCollectedProperties{};
  1067. }
  1068. Status Finish(UserCollectedProperties* properties) override {
  1069. *properties =
  1070. UserCollectedProperties{{"num_delete", std::to_string(num_deletes_)}};
  1071. return Status::OK();
  1072. }
  1073. private:
  1074. uint32_t num_deletes_ = 0;
  1075. };
  1076. class CountingDeleteTabPropCollectorFactory
  1077. : public TablePropertiesCollectorFactory {
  1078. public:
  1079. TablePropertiesCollector* CreateTablePropertiesCollector(
  1080. TablePropertiesCollectorFactory::Context /*context*/) override {
  1081. return new CountingDeleteTabPropCollector();
  1082. }
  1083. const char* Name() const override {
  1084. return "CountingDeleteTabPropCollectorFactory";
  1085. }
  1086. };
  1087. class BlockCountingTablePropertiesCollector : public TablePropertiesCollector {
  1088. public:
  1089. static const std::string kNumSampledBlocksPropertyName;
  1090. const char* Name() const override {
  1091. return "BlockCountingTablePropertiesCollector";
  1092. }
  1093. Status Finish(UserCollectedProperties* properties) override {
  1094. (*properties)[kNumSampledBlocksPropertyName] =
  1095. std::to_string(num_sampled_blocks_);
  1096. return Status::OK();
  1097. }
  1098. Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
  1099. EntryType /*type*/, SequenceNumber /*seq*/,
  1100. uint64_t /*file_size*/) override {
  1101. return Status::OK();
  1102. }
  1103. void BlockAdd(uint64_t /* block_uncomp_bytes */,
  1104. uint64_t block_compressed_bytes_fast,
  1105. uint64_t block_compressed_bytes_slow) override {
  1106. if (block_compressed_bytes_fast > 0 || block_compressed_bytes_slow > 0) {
  1107. num_sampled_blocks_++;
  1108. }
  1109. }
  1110. UserCollectedProperties GetReadableProperties() const override {
  1111. return UserCollectedProperties{
  1112. {kNumSampledBlocksPropertyName, std::to_string(num_sampled_blocks_)},
  1113. };
  1114. }
  1115. private:
  1116. uint32_t num_sampled_blocks_ = 0;
  1117. };
  1118. const std::string
  1119. BlockCountingTablePropertiesCollector::kNumSampledBlocksPropertyName =
  1120. "NumSampledBlocks";
  1121. class BlockCountingTablePropertiesCollectorFactory
  1122. : public TablePropertiesCollectorFactory {
  1123. public:
  1124. const char* Name() const override {
  1125. return "BlockCountingTablePropertiesCollectorFactory";
  1126. }
  1127. TablePropertiesCollector* CreateTablePropertiesCollector(
  1128. TablePropertiesCollectorFactory::Context /* context */) override {
  1129. return new BlockCountingTablePropertiesCollector();
  1130. }
  1131. };
  1132. TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
  1133. Options options = CurrentOptions();
  1134. options.level0_file_num_compaction_trigger = (1 << 30);
  1135. options.table_properties_collector_factories.resize(1);
  1136. std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
  1137. std::make_shared<CountingUserTblPropCollectorFactory>(0);
  1138. options.table_properties_collector_factories[0] = collector_factory;
  1139. Reopen(options);
  1140. // Create 4 tables
  1141. for (int table = 0; table < 4; ++table) {
  1142. for (int i = 0; i < 10 + table; ++i) {
  1143. ASSERT_OK(
  1144. db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val"));
  1145. }
  1146. ASSERT_OK(db_->Flush(FlushOptions()));
  1147. }
  1148. TablePropertiesCollection props;
  1149. ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
  1150. ASSERT_EQ(4U, props.size());
  1151. uint32_t sum = 0;
  1152. for (const auto& item : props) {
  1153. auto& user_collected = item.second->user_collected_properties;
  1154. ASSERT_TRUE(user_collected.find("CountingUserTblPropCollector") !=
  1155. user_collected.end());
  1156. ASSERT_EQ(user_collected.at("CountingUserTblPropCollector"), "Rocksdb");
  1157. ASSERT_TRUE(user_collected.find("Count") != user_collected.end());
  1158. Slice key(user_collected.at("Count"));
  1159. uint32_t count;
  1160. ASSERT_TRUE(GetVarint32(&key, &count));
  1161. sum += count;
  1162. }
  1163. ASSERT_EQ(10u + 11u + 12u + 13u, sum);
  1164. ASSERT_GT(collector_factory->num_created_, 0U);
  1165. collector_factory->num_created_ = 0;
  1166. ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
  1167. ASSERT_GT(collector_factory->num_created_, 0U);
  1168. }
  1169. TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
  1170. Options options = CurrentOptions();
  1171. options.level0_file_num_compaction_trigger = 3;
  1172. options.table_properties_collector_factories.resize(1);
  1173. std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
  1174. std::make_shared<CountingUserTblPropCollectorFactory>(1);
  1175. options.table_properties_collector_factories[0] = collector_factory,
  1176. CreateAndReopenWithCF({"pikachu"}, options);
  1177. // Create 2 files
  1178. for (int table = 0; table < 2; ++table) {
  1179. for (int i = 0; i < 10 + table; ++i) {
  1180. ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
  1181. }
  1182. ASSERT_OK(Flush(1));
  1183. }
  1184. ASSERT_GT(collector_factory->num_created_, 0U);
  1185. collector_factory->num_created_ = 0;
  1186. // Trigger automatic compactions.
  1187. for (int table = 0; table < 3; ++table) {
  1188. for (int i = 0; i < 10 + table; ++i) {
  1189. ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
  1190. }
  1191. ASSERT_OK(Flush(1));
  1192. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1193. }
  1194. ASSERT_GT(collector_factory->num_created_, 0U);
  1195. collector_factory->num_created_ = 0;
  1196. ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
  1197. ASSERT_GT(collector_factory->num_created_, 0U);
  1198. // Come back to write to default column family
  1199. collector_factory->num_created_ = 0;
  1200. collector_factory->set_expected_column_family_id(0); // default CF
  1201. // Create 4 tables in default column family
  1202. for (int table = 0; table < 2; ++table) {
  1203. for (int i = 0; i < 10 + table; ++i) {
  1204. ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
  1205. }
  1206. ASSERT_OK(Flush());
  1207. }
  1208. ASSERT_GT(collector_factory->num_created_, 0U);
  1209. collector_factory->num_created_ = 0;
  1210. // Trigger automatic compactions.
  1211. for (int table = 0; table < 3; ++table) {
  1212. for (int i = 0; i < 10 + table; ++i) {
  1213. ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
  1214. }
  1215. ASSERT_OK(Flush());
  1216. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1217. }
  1218. ASSERT_GT(collector_factory->num_created_, 0U);
  1219. collector_factory->num_created_ = 0;
  1220. ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
  1221. ASSERT_GT(collector_factory->num_created_, 0U);
  1222. }
  1223. TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) {
  1224. Random rnd(301);
  1225. Options options;
  1226. options.create_if_missing = true;
  1227. options.write_buffer_size = 4096;
  1228. options.max_write_buffer_number = 8;
  1229. options.level0_file_num_compaction_trigger = 2;
  1230. options.level0_slowdown_writes_trigger = 2;
  1231. options.level0_stop_writes_trigger = 4;
  1232. options.target_file_size_base = 2048;
  1233. options.max_bytes_for_level_base = 10240;
  1234. options.max_bytes_for_level_multiplier = 4;
  1235. options.soft_pending_compaction_bytes_limit = 1024 * 1024;
  1236. options.num_levels = 8;
  1237. options.env = env_;
  1238. std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
  1239. std::make_shared<CountingDeleteTabPropCollectorFactory>();
  1240. options.table_properties_collector_factories.resize(1);
  1241. options.table_properties_collector_factories[0] = collector_factory;
  1242. DestroyAndReopen(options);
  1243. const int kMaxKey = 1000;
  1244. for (int i = 0; i < kMaxKey; i++) {
  1245. ASSERT_OK(Put(Key(i), rnd.RandomString(102)));
  1246. ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102)));
  1247. }
  1248. ASSERT_OK(Flush());
  1249. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1250. if (NumTableFilesAtLevel(0) == 1) {
  1251. // Clear Level 0 so that when later flush a file with deletions,
  1252. // we don't trigger an organic compaction.
  1253. ASSERT_OK(Put(Key(0), ""));
  1254. ASSERT_OK(Put(Key(kMaxKey * 2), ""));
  1255. ASSERT_OK(Flush());
  1256. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1257. }
  1258. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  1259. {
  1260. int c = 0;
  1261. std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
  1262. iter->Seek(Key(kMaxKey - 100));
  1263. while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
  1264. iter->Next();
  1265. ++c;
  1266. }
  1267. ASSERT_OK(iter->status());
  1268. ASSERT_EQ(c, 200);
  1269. }
  1270. ASSERT_OK(Delete(Key(0)));
  1271. for (int i = kMaxKey - 100; i < kMaxKey + 100; i++) {
  1272. ASSERT_OK(Delete(Key(i)));
  1273. }
  1274. ASSERT_OK(Delete(Key(kMaxKey * 2)));
  1275. ASSERT_OK(Flush());
  1276. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1277. {
  1278. SetPerfLevel(kEnableCount);
  1279. get_perf_context()->Reset();
  1280. int c = 0;
  1281. std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
  1282. iter->Seek(Key(kMaxKey - 100));
  1283. while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
  1284. iter->Next();
  1285. }
  1286. ASSERT_OK(iter->status());
  1287. ASSERT_EQ(c, 0);
  1288. ASSERT_LT(get_perf_context()->internal_delete_skipped_count, 30u);
  1289. ASSERT_LT(get_perf_context()->internal_key_skipped_count, 30u);
  1290. SetPerfLevel(kDisable);
  1291. }
  1292. }
  1293. TEST_F(DBPropertiesTest, NeedCompactHintPersistentTest) {
  1294. Random rnd(301);
  1295. Options options;
  1296. options.create_if_missing = true;
  1297. options.max_write_buffer_number = 8;
  1298. options.level0_file_num_compaction_trigger = 10;
  1299. options.level0_slowdown_writes_trigger = 10;
  1300. options.level0_stop_writes_trigger = 10;
  1301. options.disable_auto_compactions = true;
  1302. options.env = env_;
  1303. std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
  1304. std::make_shared<CountingDeleteTabPropCollectorFactory>();
  1305. options.table_properties_collector_factories.resize(1);
  1306. options.table_properties_collector_factories[0] = collector_factory;
  1307. DestroyAndReopen(options);
  1308. const int kMaxKey = 100;
  1309. for (int i = 0; i < kMaxKey; i++) {
  1310. ASSERT_OK(Put(Key(i), ""));
  1311. }
  1312. ASSERT_OK(Flush());
  1313. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  1314. for (int i = 1; i < kMaxKey - 1; i++) {
  1315. ASSERT_OK(Delete(Key(i)));
  1316. }
  1317. ASSERT_OK(Flush());
  1318. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  1319. ASSERT_EQ(NumTableFilesAtLevel(0), 2);
  1320. // Restart the DB. Although number of files didn't reach
  1321. // options.level0_file_num_compaction_trigger, compaction should
  1322. // still be triggered because of the need-compaction hint.
  1323. options.disable_auto_compactions = false;
  1324. Reopen(options);
  1325. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1326. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  1327. {
  1328. SetPerfLevel(kEnableCount);
  1329. get_perf_context()->Reset();
  1330. int c = 0;
  1331. std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
  1332. for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
  1333. c++;
  1334. }
  1335. ASSERT_OK(iter->status());
  1336. ASSERT_EQ(c, 2);
  1337. ASSERT_EQ(get_perf_context()->internal_delete_skipped_count, 0);
  1338. // We iterate every key twice. Is it a bug?
  1339. ASSERT_LE(get_perf_context()->internal_key_skipped_count, 2);
  1340. SetPerfLevel(kDisable);
  1341. }
  1342. }
  1343. // Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
  1344. TEST_F(DBPropertiesTest, BlockAddForCompressionSampling) {
  1345. // Sampled compression requires at least one of the following four types.
  1346. if (!Snappy_Supported() && !Zlib_Supported() && !LZ4_Supported() &&
  1347. !ZSTD_Supported()) {
  1348. return;
  1349. }
  1350. Options options = CurrentOptions();
  1351. options.disable_auto_compactions = true;
  1352. options.table_properties_collector_factories.emplace_back(
  1353. std::make_shared<BlockCountingTablePropertiesCollectorFactory>());
  1354. for (bool sample_for_compression : {false, true}) {
  1355. // For simplicity/determinism, sample 100% when enabled, or 0% when disabled
  1356. options.sample_for_compression = sample_for_compression ? 1 : 0;
  1357. DestroyAndReopen(options);
  1358. // Setup the following LSM:
  1359. //
  1360. // L0_0 ["a", "b"]
  1361. // L1_0 ["a", "b"]
  1362. //
  1363. // L0_0 was created by flush. L1_0 was created by compaction. Each file
  1364. // contains one data block.
  1365. for (int i = 0; i < 3; ++i) {
  1366. ASSERT_OK(Put("a", "val"));
  1367. ASSERT_OK(Put("b", "val"));
  1368. ASSERT_OK(Flush());
  1369. if (i == 1) {
  1370. ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1371. }
  1372. }
  1373. // A `BlockAdd()` should have been seen for files generated by flush or
  1374. // compaction when `sample_for_compression` is enabled.
  1375. TablePropertiesCollection file_to_props;
  1376. ASSERT_OK(db_->GetPropertiesOfAllTables(&file_to_props));
  1377. ASSERT_EQ(2, file_to_props.size());
  1378. for (const auto& file_and_props : file_to_props) {
  1379. auto& user_props = file_and_props.second->user_collected_properties;
  1380. ASSERT_TRUE(user_props.find(BlockCountingTablePropertiesCollector::
  1381. kNumSampledBlocksPropertyName) !=
  1382. user_props.end());
  1383. ASSERT_EQ(user_props.at(BlockCountingTablePropertiesCollector::
  1384. kNumSampledBlocksPropertyName),
  1385. std::to_string(sample_for_compression ? 1 : 0));
  1386. }
  1387. }
  1388. }
  1389. class CompressionSamplingDBPropertiesTest
  1390. : public DBPropertiesTest,
  1391. public ::testing::WithParamInterface<bool> {
  1392. public:
  1393. CompressionSamplingDBPropertiesTest() : fast_(GetParam()) {}
  1394. protected:
  1395. const bool fast_;
  1396. };
  1397. INSTANTIATE_TEST_CASE_P(CompressionSamplingDBPropertiesTest,
  1398. CompressionSamplingDBPropertiesTest, ::testing::Bool());
  1399. // Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
  1400. TEST_P(CompressionSamplingDBPropertiesTest,
  1401. EstimateDataSizeWithCompressionSampling) {
  1402. Options options = CurrentOptions();
  1403. if (fast_) {
  1404. // One of the following light compression libraries must be present.
  1405. if (LZ4_Supported()) {
  1406. options.compression = kLZ4Compression;
  1407. } else if (Snappy_Supported()) {
  1408. options.compression = kSnappyCompression;
  1409. } else {
  1410. return;
  1411. }
  1412. } else {
  1413. // One of the following heavy compression libraries must be present.
  1414. if (ZSTD_Supported()) {
  1415. options.compression = kZSTD;
  1416. } else if (Zlib_Supported()) {
  1417. options.compression = kZlibCompression;
  1418. } else {
  1419. return;
  1420. }
  1421. }
  1422. options.disable_auto_compactions = true;
  1423. // For simplicity/determinism, sample 100%.
  1424. options.sample_for_compression = 1;
  1425. Reopen(options);
  1426. // Setup the following LSM:
  1427. //
  1428. // L0_0 ["a", "b"]
  1429. // L1_0 ["a", "b"]
  1430. //
  1431. // L0_0 was created by flush. L1_0 was created by compaction. Each file
  1432. // contains one data block. The value consists of compressible data so the
  1433. // data block should be stored compressed.
  1434. std::string val(1024, 'a');
  1435. for (int i = 0; i < 3; ++i) {
  1436. ASSERT_OK(Put("a", val));
  1437. ASSERT_OK(Put("b", val));
  1438. ASSERT_OK(Flush());
  1439. if (i == 1) {
  1440. ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1441. }
  1442. }
  1443. TablePropertiesCollection file_to_props;
  1444. ASSERT_OK(db_->GetPropertiesOfAllTables(&file_to_props));
  1445. ASSERT_EQ(2, file_to_props.size());
  1446. for (const auto& file_and_props : file_to_props) {
  1447. ASSERT_GT(file_and_props.second->data_size, 0);
  1448. if (fast_) {
  1449. ASSERT_EQ(file_and_props.second->data_size,
  1450. file_and_props.second->fast_compression_estimated_data_size);
  1451. } else {
  1452. ASSERT_EQ(file_and_props.second->data_size,
  1453. file_and_props.second->slow_compression_estimated_data_size);
  1454. }
  1455. }
  1456. }
  1457. TEST_F(DBPropertiesTest, EstimateNumKeysUnderflow) {
  1458. Options options = CurrentOptions();
  1459. Reopen(options);
  1460. ASSERT_OK(Put("foo", "bar"));
  1461. ASSERT_OK(Delete("foo"));
  1462. ASSERT_OK(Delete("foo"));
  1463. uint64_t num_keys = 0;
  1464. ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &num_keys));
  1465. ASSERT_EQ(0, num_keys);
  1466. }
  1467. TEST_F(DBPropertiesTest, EstimateOldestKeyTime) {
  1468. uint64_t oldest_key_time = 0;
  1469. Options options = CurrentOptions();
  1470. SetTimeElapseOnlySleepOnReopen(&options);
  1471. // "rocksdb.estimate-oldest-key-time" only available to fifo compaction.
  1472. for (auto compaction : {kCompactionStyleLevel, kCompactionStyleUniversal,
  1473. kCompactionStyleNone}) {
  1474. options.compaction_style = compaction;
  1475. options.create_if_missing = true;
  1476. DestroyAndReopen(options);
  1477. ASSERT_OK(Put("foo", "bar"));
  1478. ASSERT_FALSE(dbfull()->GetIntProperty(
  1479. DB::Properties::kEstimateOldestKeyTime, &oldest_key_time));
  1480. }
  1481. int64_t mock_start_time;
  1482. ASSERT_OK(env_->GetCurrentTime(&mock_start_time));
  1483. options.compaction_style = kCompactionStyleFIFO;
  1484. options.ttl = 300;
  1485. options.max_open_files = -1;
  1486. options.compaction_options_fifo.allow_compaction = false;
  1487. DestroyAndReopen(options);
  1488. env_->MockSleepForSeconds(100);
  1489. ASSERT_OK(Put("k1", "v1"));
  1490. ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1491. &oldest_key_time));
  1492. ASSERT_EQ(100, oldest_key_time - mock_start_time);
  1493. ASSERT_OK(Flush());
  1494. ASSERT_EQ("1", FilesPerLevel());
  1495. ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1496. &oldest_key_time));
  1497. ASSERT_EQ(100, oldest_key_time - mock_start_time);
  1498. env_->MockSleepForSeconds(100); // -> 200
  1499. ASSERT_OK(Put("k2", "v2"));
  1500. ASSERT_OK(Flush());
  1501. ASSERT_EQ("2", FilesPerLevel());
  1502. ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1503. &oldest_key_time));
  1504. ASSERT_EQ(100, oldest_key_time - mock_start_time);
  1505. env_->MockSleepForSeconds(100); // -> 300
  1506. ASSERT_OK(Put("k3", "v3"));
  1507. ASSERT_OK(Flush());
  1508. ASSERT_EQ("3", FilesPerLevel());
  1509. ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1510. &oldest_key_time));
  1511. ASSERT_EQ(100, oldest_key_time - mock_start_time);
  1512. env_->MockSleepForSeconds(150); // -> 450
  1513. ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1514. ASSERT_EQ("2", FilesPerLevel());
  1515. ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1516. &oldest_key_time));
  1517. ASSERT_EQ(200, oldest_key_time - mock_start_time);
  1518. env_->MockSleepForSeconds(100); // -> 550
  1519. ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1520. ASSERT_EQ("1", FilesPerLevel());
  1521. ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1522. &oldest_key_time));
  1523. ASSERT_EQ(300, oldest_key_time - mock_start_time);
  1524. env_->MockSleepForSeconds(100); // -> 650
  1525. ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1526. ASSERT_EQ("", FilesPerLevel());
  1527. ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
  1528. &oldest_key_time));
  1529. }
  1530. TEST_F(DBPropertiesTest, SstFilesSize) {
  1531. struct TestListener : public EventListener {
  1532. void OnCompactionCompleted(DB* db,
  1533. const CompactionJobInfo& /*info*/) override {
  1534. assert(callback_triggered == false);
  1535. assert(size_before_compaction > 0);
  1536. callback_triggered = true;
  1537. uint64_t total_sst_size = 0;
  1538. uint64_t live_sst_size = 0;
  1539. bool ok = db->GetIntProperty(DB::Properties::kTotalSstFilesSize,
  1540. &total_sst_size);
  1541. ASSERT_TRUE(ok);
  1542. // total_sst_size include files before and after compaction.
  1543. ASSERT_GT(total_sst_size, size_before_compaction);
  1544. ok =
  1545. db->GetIntProperty(DB::Properties::kLiveSstFilesSize, &live_sst_size);
  1546. ASSERT_TRUE(ok);
  1547. // live_sst_size only include files after compaction.
  1548. ASSERT_GT(live_sst_size, 0);
  1549. ASSERT_LT(live_sst_size, size_before_compaction);
  1550. }
  1551. uint64_t size_before_compaction = 0;
  1552. bool callback_triggered = false;
  1553. };
  1554. std::shared_ptr<TestListener> listener = std::make_shared<TestListener>();
  1555. Options options;
  1556. options.env = CurrentOptions().env;
  1557. options.disable_auto_compactions = true;
  1558. options.listeners.push_back(listener);
  1559. Reopen(options);
  1560. for (int i = 0; i < 10; i++) {
  1561. ASSERT_OK(Put("key" + std::to_string(i), std::string(1000, 'v')));
  1562. }
  1563. ASSERT_OK(Flush());
  1564. for (int i = 0; i < 5; i++) {
  1565. ASSERT_OK(Delete("key" + std::to_string(i)));
  1566. }
  1567. ASSERT_OK(Flush());
  1568. uint64_t sst_size;
  1569. ASSERT_TRUE(
  1570. db_->GetIntProperty(DB::Properties::kTotalSstFilesSize, &sst_size));
  1571. ASSERT_GT(sst_size, 0);
  1572. listener->size_before_compaction = sst_size;
  1573. uint64_t obsolete_sst_size;
  1574. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kObsoleteSstFilesSize,
  1575. &obsolete_sst_size));
  1576. ASSERT_EQ(obsolete_sst_size, 0);
  1577. // Hold files from being deleted so we can test property for size of obsolete
  1578. // SST files.
  1579. ASSERT_OK(db_->DisableFileDeletions());
  1580. // Compact to clean all keys and trigger listener.
  1581. ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1582. ASSERT_TRUE(listener->callback_triggered);
  1583. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kObsoleteSstFilesSize,
  1584. &obsolete_sst_size));
  1585. ASSERT_EQ(obsolete_sst_size, sst_size);
  1586. // Let the obsolete files be deleted.
  1587. ASSERT_OK(db_->EnableFileDeletions());
  1588. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kObsoleteSstFilesSize,
  1589. &obsolete_sst_size));
  1590. ASSERT_EQ(obsolete_sst_size, 0);
  1591. }
  1592. TEST_F(DBPropertiesTest, MinObsoleteSstNumberToKeep) {
  1593. class TestListener : public EventListener {
  1594. public:
  1595. void OnTableFileCreated(const TableFileCreationInfo& info) override {
  1596. if (info.reason == TableFileCreationReason::kCompaction) {
  1597. // Verify the property indicates that SSTs created by a running
  1598. // compaction cannot be deleted.
  1599. uint64_t created_file_num;
  1600. FileType created_file_type;
  1601. std::string filename =
  1602. info.file_path.substr(info.file_path.rfind('/') + 1);
  1603. ASSERT_TRUE(
  1604. ParseFileName(filename, &created_file_num, &created_file_type));
  1605. ASSERT_EQ(kTableFile, created_file_type);
  1606. uint64_t keep_sst_lower_bound;
  1607. ASSERT_TRUE(
  1608. db_->GetIntProperty(DB::Properties::kMinObsoleteSstNumberToKeep,
  1609. &keep_sst_lower_bound));
  1610. ASSERT_LE(keep_sst_lower_bound, created_file_num);
  1611. validated_ = true;
  1612. }
  1613. }
  1614. void SetDB(DB* db) { db_ = db; }
  1615. int GetNumCompactions() { return num_compactions_; }
  1616. // True if we've verified the property for at least one output file
  1617. bool Validated() { return validated_; }
  1618. private:
  1619. int num_compactions_ = 0;
  1620. bool validated_ = false;
  1621. DB* db_ = nullptr;
  1622. };
  1623. const int kNumL0Files = 4;
  1624. std::shared_ptr<TestListener> listener = std::make_shared<TestListener>();
  1625. Options options = CurrentOptions();
  1626. options.listeners.push_back(listener);
  1627. options.level0_file_num_compaction_trigger = kNumL0Files;
  1628. DestroyAndReopen(options);
  1629. listener->SetDB(db_);
  1630. for (int i = 0; i < kNumL0Files; ++i) {
  1631. // Make sure they overlap in keyspace to prevent trivial move
  1632. ASSERT_OK(Put("key1", "val"));
  1633. ASSERT_OK(Put("key2", "val"));
  1634. ASSERT_OK(Flush());
  1635. }
  1636. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1637. ASSERT_TRUE(listener->Validated());
  1638. }
  1639. TEST_F(DBPropertiesTest, BlobCacheProperties) {
  1640. Options options;
  1641. uint64_t value;
  1642. options.env = CurrentOptions().env;
  1643. // Test with empty blob cache.
  1644. constexpr size_t kCapacity = 100;
  1645. LRUCacheOptions co;
  1646. co.capacity = kCapacity;
  1647. co.num_shard_bits = 0;
  1648. co.metadata_charge_policy = kDontChargeCacheMetadata;
  1649. auto blob_cache = NewLRUCache(co);
  1650. options.blob_cache = blob_cache;
  1651. Reopen(options);
  1652. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
  1653. ASSERT_EQ(kCapacity, value);
  1654. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
  1655. ASSERT_EQ(0, value);
  1656. ASSERT_TRUE(
  1657. db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
  1658. ASSERT_EQ(0, value);
  1659. // Insert unpinned blob to the cache and check size.
  1660. constexpr size_t kSize1 = 70;
  1661. ASSERT_OK(blob_cache->Insert("blob1", nullptr /*value*/,
  1662. &kNoopCacheItemHelper, kSize1));
  1663. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
  1664. ASSERT_EQ(kCapacity, value);
  1665. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
  1666. ASSERT_EQ(kSize1, value);
  1667. ASSERT_TRUE(
  1668. db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
  1669. ASSERT_EQ(0, value);
  1670. // Insert pinned blob to the cache and check size.
  1671. constexpr size_t kSize2 = 60;
  1672. Cache::Handle* blob2 = nullptr;
  1673. ASSERT_OK(blob_cache->Insert("blob2", nullptr /*value*/,
  1674. &kNoopCacheItemHelper, kSize2, &blob2));
  1675. ASSERT_NE(nullptr, blob2);
  1676. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
  1677. ASSERT_EQ(kCapacity, value);
  1678. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
  1679. // blob1 is evicted.
  1680. ASSERT_EQ(kSize2, value);
  1681. ASSERT_TRUE(
  1682. db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
  1683. ASSERT_EQ(kSize2, value);
  1684. // Insert another pinned blob to make the cache over-sized.
  1685. constexpr size_t kSize3 = 80;
  1686. Cache::Handle* blob3 = nullptr;
  1687. ASSERT_OK(blob_cache->Insert("blob3", nullptr /*value*/,
  1688. &kNoopCacheItemHelper, kSize3, &blob3));
  1689. ASSERT_NE(nullptr, blob3);
  1690. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
  1691. ASSERT_EQ(kCapacity, value);
  1692. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
  1693. ASSERT_EQ(kSize2 + kSize3, value);
  1694. ASSERT_TRUE(
  1695. db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
  1696. ASSERT_EQ(kSize2 + kSize3, value);
  1697. // Check size after release.
  1698. blob_cache->Release(blob2);
  1699. blob_cache->Release(blob3);
  1700. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
  1701. ASSERT_EQ(kCapacity, value);
  1702. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
  1703. // blob2 will be evicted, while blob3 remain in cache after release.
  1704. ASSERT_EQ(kSize3, value);
  1705. ASSERT_TRUE(
  1706. db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
  1707. ASSERT_EQ(0, value);
  1708. }
  1709. TEST_F(DBPropertiesTest, BlockCacheProperties) {
  1710. Options options;
  1711. uint64_t value;
  1712. options.env = CurrentOptions().env;
  1713. // Block cache properties are not available for tables other than
  1714. // block-based table.
  1715. options.table_factory.reset(NewPlainTableFactory());
  1716. Reopen(options);
  1717. ASSERT_FALSE(
  1718. db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1719. ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1720. ASSERT_FALSE(
  1721. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1722. options.table_factory.reset(NewCuckooTableFactory());
  1723. Reopen(options);
  1724. ASSERT_FALSE(
  1725. db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1726. ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1727. ASSERT_FALSE(
  1728. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1729. // Block cache properties are not available if block cache is not used.
  1730. BlockBasedTableOptions table_options;
  1731. table_options.no_block_cache = true;
  1732. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  1733. Reopen(options);
  1734. ASSERT_FALSE(
  1735. db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1736. ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1737. ASSERT_FALSE(
  1738. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1739. // Test with empty block cache.
  1740. constexpr size_t kCapacity = 100;
  1741. LRUCacheOptions co;
  1742. co.capacity = kCapacity;
  1743. co.num_shard_bits = 0;
  1744. co.metadata_charge_policy = kDontChargeCacheMetadata;
  1745. auto block_cache = NewLRUCache(co);
  1746. table_options.block_cache = block_cache;
  1747. table_options.no_block_cache = false;
  1748. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  1749. Reopen(options);
  1750. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1751. ASSERT_EQ(kCapacity, value);
  1752. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1753. ASSERT_EQ(0, value);
  1754. ASSERT_TRUE(
  1755. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1756. ASSERT_EQ(0, value);
  1757. // Insert unpinned item to the cache and check size.
  1758. constexpr size_t kSize1 = 50;
  1759. ASSERT_OK(block_cache->Insert("item1", nullptr /*value*/,
  1760. &kNoopCacheItemHelper, kSize1));
  1761. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1762. ASSERT_EQ(kCapacity, value);
  1763. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1764. ASSERT_EQ(kSize1, value);
  1765. ASSERT_TRUE(
  1766. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1767. ASSERT_EQ(0, value);
  1768. // Insert pinned item to the cache and check size.
  1769. constexpr size_t kSize2 = 30;
  1770. Cache::Handle* item2 = nullptr;
  1771. ASSERT_OK(block_cache->Insert("item2", nullptr /*value*/,
  1772. &kNoopCacheItemHelper, kSize2, &item2));
  1773. ASSERT_NE(nullptr, item2);
  1774. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1775. ASSERT_EQ(kCapacity, value);
  1776. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1777. ASSERT_EQ(kSize1 + kSize2, value);
  1778. ASSERT_TRUE(
  1779. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1780. ASSERT_EQ(kSize2, value);
  1781. // Insert another pinned item to make the cache over-sized.
  1782. constexpr size_t kSize3 = 80;
  1783. Cache::Handle* item3 = nullptr;
  1784. ASSERT_OK(block_cache->Insert("item3", nullptr /*value*/,
  1785. &kNoopCacheItemHelper, kSize3, &item3));
  1786. ASSERT_NE(nullptr, item2);
  1787. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1788. ASSERT_EQ(kCapacity, value);
  1789. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1790. // Item 1 is evicted.
  1791. ASSERT_EQ(kSize2 + kSize3, value);
  1792. ASSERT_TRUE(
  1793. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1794. ASSERT_EQ(kSize2 + kSize3, value);
  1795. // Check size after release.
  1796. block_cache->Release(item2);
  1797. block_cache->Release(item3);
  1798. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
  1799. ASSERT_EQ(kCapacity, value);
  1800. ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
  1801. // item2 will be evicted, while item3 remain in cache after release.
  1802. ASSERT_EQ(kSize3, value);
  1803. ASSERT_TRUE(
  1804. db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
  1805. ASSERT_EQ(0, value);
  1806. }
  1807. TEST_F(DBPropertiesTest, GetMapPropertyDbStats) {
  1808. auto mock_clock = std::make_shared<MockSystemClock>(env_->GetSystemClock());
  1809. CompositeEnvWrapper env(env_, mock_clock);
  1810. Options opts = CurrentOptions();
  1811. opts.env = &env;
  1812. Reopen(opts);
  1813. {
  1814. std::map<std::string, std::string> db_stats;
  1815. ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
  1816. AssertDbStats(db_stats, 0.0 /* expected_uptime */,
  1817. 0 /* expected_user_bytes_written */,
  1818. 0 /* expected_wal_bytes_written */,
  1819. 0 /* expected_user_writes_by_self */,
  1820. 0 /* expected_user_writes_with_wal */);
  1821. }
  1822. {
  1823. mock_clock->SleepForMicroseconds(1500000);
  1824. std::map<std::string, std::string> db_stats;
  1825. ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
  1826. AssertDbStats(db_stats, 1.5 /* expected_uptime */,
  1827. 0 /* expected_user_bytes_written */,
  1828. 0 /* expected_wal_bytes_written */,
  1829. 0 /* expected_user_writes_by_self */,
  1830. 0 /* expected_user_writes_with_wal */);
  1831. }
  1832. int expected_user_bytes_written = 0;
  1833. {
  1834. // Write with WAL disabled.
  1835. WriteOptions write_opts;
  1836. write_opts.disableWAL = true;
  1837. WriteBatch batch;
  1838. ASSERT_OK(batch.Put("key", "val"));
  1839. expected_user_bytes_written += static_cast<int>(batch.GetDataSize());
  1840. ASSERT_OK(db_->Write(write_opts, &batch));
  1841. std::map<std::string, std::string> db_stats;
  1842. ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
  1843. AssertDbStats(db_stats, 1.5 /* expected_uptime */,
  1844. expected_user_bytes_written,
  1845. 0 /* expected_wal_bytes_written */,
  1846. 1 /* expected_user_writes_by_self */,
  1847. 0 /* expected_user_writes_with_wal */);
  1848. }
  1849. int expected_wal_bytes_written = 0;
  1850. {
  1851. // Write with WAL enabled.
  1852. WriteBatch batch;
  1853. ASSERT_OK(batch.Delete("key"));
  1854. expected_user_bytes_written += static_cast<int>(batch.GetDataSize());
  1855. expected_wal_bytes_written += static_cast<int>(batch.GetDataSize());
  1856. ASSERT_OK(db_->Write(WriteOptions(), &batch));
  1857. std::map<std::string, std::string> db_stats;
  1858. ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
  1859. AssertDbStats(db_stats, 1.5 /* expected_uptime */,
  1860. expected_user_bytes_written, expected_wal_bytes_written,
  1861. 2 /* expected_user_writes_by_self */,
  1862. 1 /* expected_user_writes_with_wal */);
  1863. }
  1864. Close();
  1865. }
  1866. TEST_F(DBPropertiesTest, GetMapPropertyBlockCacheEntryStats) {
  1867. // Currently only verifies the expected properties are present
  1868. std::map<std::string, std::string> values;
  1869. ASSERT_TRUE(
  1870. db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
  1871. ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheId()) !=
  1872. values.end());
  1873. ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheCapacityBytes()) !=
  1874. values.end());
  1875. ASSERT_TRUE(
  1876. values.find(
  1877. BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()) !=
  1878. values.end());
  1879. ASSERT_TRUE(
  1880. values.find(BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()) !=
  1881. values.end());
  1882. for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
  1883. CacheEntryRole role = static_cast<CacheEntryRole>(i);
  1884. ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::EntryCount(role)) !=
  1885. values.end());
  1886. ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedBytes(role)) !=
  1887. values.end());
  1888. ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedPercent(role)) !=
  1889. values.end());
  1890. }
  1891. // There should be no extra values in the map.
  1892. ASSERT_EQ(3 * kNumCacheEntryRoles + 4, values.size());
  1893. }
  1894. TEST_F(DBPropertiesTest, WriteStallStatsSanityCheck) {
  1895. for (uint32_t i = 0; i < static_cast<uint32_t>(WriteStallCause::kNone); ++i) {
  1896. WriteStallCause cause = static_cast<WriteStallCause>(i);
  1897. const std::string& str = WriteStallCauseToHyphenString(cause);
  1898. ASSERT_TRUE(!str.empty())
  1899. << "Please ensure mapping from `WriteStallCause` to "
  1900. "`WriteStallCauseToHyphenString` is complete";
  1901. if (cause == WriteStallCause::kCFScopeWriteStallCauseEnumMax ||
  1902. cause == WriteStallCause::kDBScopeWriteStallCauseEnumMax) {
  1903. ASSERT_EQ(str, InvalidWriteStallHyphenString())
  1904. << "Please ensure order in `WriteStallCauseToHyphenString` is "
  1905. "consistent with `WriteStallCause`";
  1906. }
  1907. }
  1908. for (uint32_t i = 0; i < static_cast<uint32_t>(WriteStallCondition::kNormal);
  1909. ++i) {
  1910. WriteStallCondition condition = static_cast<WriteStallCondition>(i);
  1911. const std::string& str = WriteStallConditionToHyphenString(condition);
  1912. ASSERT_TRUE(!str.empty())
  1913. << "Please ensure mapping from `WriteStallCondition` to "
  1914. "`WriteStallConditionToHyphenString` is complete";
  1915. }
  1916. for (uint32_t i = 0; i < static_cast<uint32_t>(WriteStallCause::kNone); ++i) {
  1917. for (uint32_t j = 0;
  1918. j < static_cast<uint32_t>(WriteStallCondition::kNormal); ++j) {
  1919. WriteStallCause cause = static_cast<WriteStallCause>(i);
  1920. WriteStallCondition condition = static_cast<WriteStallCondition>(j);
  1921. if (isCFScopeWriteStallCause(cause)) {
  1922. ASSERT_TRUE(InternalCFStat(cause, condition) !=
  1923. InternalStats::INTERNAL_CF_STATS_ENUM_MAX)
  1924. << "Please ensure the combination of WriteStallCause(" +
  1925. std::to_string(static_cast<uint32_t>(cause)) +
  1926. ") + WriteStallCondition(" +
  1927. std::to_string(static_cast<uint32_t>(condition)) +
  1928. ") is correctly mapped to a valid `InternalStats` or bypass "
  1929. "its check in this test";
  1930. } else if (isDBScopeWriteStallCause(cause)) {
  1931. InternalStats::InternalDBStatsType internal_db_stat =
  1932. InternalDBStat(cause, condition);
  1933. if (internal_db_stat == InternalStats::kIntStatsNumMax) {
  1934. ASSERT_TRUE(cause == WriteStallCause::kWriteBufferManagerLimit &&
  1935. condition == WriteStallCondition::kDelayed)
  1936. << "Please ensure the combination of WriteStallCause(" +
  1937. std::to_string(static_cast<uint32_t>(cause)) +
  1938. ") + WriteStallCondition(" +
  1939. std::to_string(static_cast<uint32_t>(condition)) +
  1940. ") is correctly mapped to a valid `InternalStats` or "
  1941. "bypass its check in this test";
  1942. }
  1943. } else if (cause != WriteStallCause::kCFScopeWriteStallCauseEnumMax &&
  1944. cause != WriteStallCause::kDBScopeWriteStallCauseEnumMax) {
  1945. ASSERT_TRUE(false) << "Please ensure the WriteStallCause(" +
  1946. std::to_string(static_cast<uint32_t>(cause)) +
  1947. ") is either CF-scope or DB-scope write "
  1948. "stall cause in enum `WriteStallCause`";
  1949. }
  1950. }
  1951. }
  1952. }
  1953. TEST_F(DBPropertiesTest, GetMapPropertyWriteStallStats) {
  1954. Options options = CurrentOptions();
  1955. CreateAndReopenWithCF({"heavy_write_cf"}, options);
  1956. for (auto test_cause : {WriteStallCause::kWriteBufferManagerLimit,
  1957. WriteStallCause::kMemtableLimit}) {
  1958. if (test_cause == WriteStallCause::kWriteBufferManagerLimit) {
  1959. options.write_buffer_manager.reset(
  1960. new WriteBufferManager(100000, nullptr, true));
  1961. } else if (test_cause == WriteStallCause::kMemtableLimit) {
  1962. options.max_write_buffer_number = 2;
  1963. options.disable_auto_compactions = true;
  1964. }
  1965. ReopenWithColumnFamilies({"default", "heavy_write_cf"}, options);
  1966. // Assert initial write stall stats are all 0
  1967. std::map<std::string, std::string> db_values;
  1968. ASSERT_TRUE(dbfull()->GetMapProperty(DB::Properties::kDBWriteStallStats,
  1969. &db_values));
  1970. ASSERT_EQ(std::stoi(db_values[WriteStallStatsMapKeys::CauseConditionCount(
  1971. WriteStallCause::kWriteBufferManagerLimit,
  1972. WriteStallCondition::kStopped)]),
  1973. 0);
  1974. for (int cf = 0; cf <= 1; ++cf) {
  1975. std::map<std::string, std::string> cf_values;
  1976. ASSERT_TRUE(dbfull()->GetMapProperty(
  1977. handles_[cf], DB::Properties::kCFWriteStallStats, &cf_values));
  1978. ASSERT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalStops()]), 0);
  1979. ASSERT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalDelays()]), 0);
  1980. }
  1981. // Pause flush thread to help coerce write stall
  1982. std::unique_ptr<test::SleepingBackgroundTask> sleeping_task(
  1983. new test::SleepingBackgroundTask());
  1984. env_->SetBackgroundThreads(1, Env::HIGH);
  1985. env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
  1986. sleeping_task.get(), Env::Priority::HIGH);
  1987. sleeping_task->WaitUntilSleeping();
  1988. // Coerce write stall
  1989. if (test_cause == WriteStallCause::kWriteBufferManagerLimit) {
  1990. ASSERT_OK(dbfull()->Put(
  1991. WriteOptions(), handles_[1], Key(1),
  1992. DummyString(options.write_buffer_manager->buffer_size())));
  1993. WriteOptions wo;
  1994. wo.no_slowdown = true;
  1995. Status s = dbfull()->Put(
  1996. wo, handles_[1], Key(2),
  1997. DummyString(options.write_buffer_manager->buffer_size()));
  1998. ASSERT_TRUE(s.IsIncomplete());
  1999. ASSERT_TRUE(s.ToString().find("Write stall") != std::string::npos);
  2000. } else if (test_cause == WriteStallCause::kMemtableLimit) {
  2001. FlushOptions fo;
  2002. fo.allow_write_stall = true;
  2003. fo.wait = false;
  2004. ASSERT_OK(
  2005. dbfull()->Put(WriteOptions(), handles_[1], Key(1), DummyString(1)));
  2006. ASSERT_OK(dbfull()->Flush(fo, handles_[1]));
  2007. ASSERT_OK(
  2008. dbfull()->Put(WriteOptions(), handles_[1], Key(2), DummyString(1)));
  2009. ASSERT_OK(dbfull()->Flush(fo, handles_[1]));
  2010. }
  2011. if (test_cause == WriteStallCause::kWriteBufferManagerLimit) {
  2012. db_values.clear();
  2013. EXPECT_TRUE(dbfull()->GetMapProperty(DB::Properties::kDBWriteStallStats,
  2014. &db_values));
  2015. EXPECT_EQ(std::stoi(db_values[WriteStallStatsMapKeys::CauseConditionCount(
  2016. WriteStallCause::kWriteBufferManagerLimit,
  2017. WriteStallCondition::kStopped)]),
  2018. 1);
  2019. // `WriteStallCause::kWriteBufferManagerLimit` should not result in any
  2020. // CF-scope write stall stats changes
  2021. for (int cf = 0; cf <= 1; ++cf) {
  2022. std::map<std::string, std::string> cf_values;
  2023. EXPECT_TRUE(dbfull()->GetMapProperty(
  2024. handles_[cf], DB::Properties::kCFWriteStallStats, &cf_values));
  2025. EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalStops()]),
  2026. 0);
  2027. EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalDelays()]),
  2028. 0);
  2029. }
  2030. } else if (test_cause == WriteStallCause::kMemtableLimit) {
  2031. for (int cf = 0; cf <= 1; ++cf) {
  2032. std::map<std::string, std::string> cf_values;
  2033. EXPECT_TRUE(dbfull()->GetMapProperty(
  2034. handles_[cf], DB::Properties::kCFWriteStallStats, &cf_values));
  2035. EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalStops()]),
  2036. cf == 1 ? 1 : 0);
  2037. EXPECT_EQ(
  2038. std::stoi(cf_values[WriteStallStatsMapKeys::CauseConditionCount(
  2039. WriteStallCause::kMemtableLimit,
  2040. WriteStallCondition::kStopped)]),
  2041. cf == 1 ? 1 : 0);
  2042. EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalDelays()]),
  2043. 0);
  2044. EXPECT_EQ(
  2045. std::stoi(cf_values[WriteStallStatsMapKeys::CauseConditionCount(
  2046. WriteStallCause::kMemtableLimit,
  2047. WriteStallCondition::kDelayed)]),
  2048. 0);
  2049. }
  2050. }
  2051. sleeping_task->WakeUp();
  2052. sleeping_task->WaitUntilDone();
  2053. }
  2054. }
  2055. namespace {
  2056. std::string PopMetaIndexKey(InternalIterator* meta_iter) {
  2057. Status s = meta_iter->status();
  2058. if (!s.ok()) {
  2059. return s.ToString();
  2060. } else if (meta_iter->Valid()) {
  2061. std::string rv = meta_iter->key().ToString();
  2062. meta_iter->Next();
  2063. return rv;
  2064. } else {
  2065. return "NOT_FOUND";
  2066. }
  2067. }
  2068. } // anonymous namespace
  2069. TEST_F(DBPropertiesTest, TableMetaIndexKeys) {
  2070. // This is to detect unexpected churn in metaindex block keys. This is more
  2071. // of a "table test" but table_test.cc doesn't depend on db_test_util.h and
  2072. // we need ChangeOptions() for broad coverage.
  2073. constexpr int kKeyCount = 100;
  2074. do {
  2075. Options options;
  2076. options = CurrentOptions(options);
  2077. DestroyAndReopen(options);
  2078. // Create an SST file
  2079. for (int key = 0; key < kKeyCount; key++) {
  2080. ASSERT_OK(Put(Key(key), "val"));
  2081. }
  2082. ASSERT_OK(Flush());
  2083. // Find its file number
  2084. std::vector<LiveFileMetaData> files;
  2085. db_->GetLiveFilesMetaData(&files);
  2086. // 1 SST file
  2087. ASSERT_EQ(1, files.size());
  2088. // Open it for inspection
  2089. std::string sst_file =
  2090. files[0].directory + "/" + files[0].relative_filename;
  2091. std::unique_ptr<FSRandomAccessFile> f;
  2092. ASSERT_OK(env_->GetFileSystem()->NewRandomAccessFile(
  2093. sst_file, FileOptions(), &f, nullptr));
  2094. std::unique_ptr<RandomAccessFileReader> r;
  2095. r.reset(new RandomAccessFileReader(std::move(f), sst_file));
  2096. uint64_t file_size = 0;
  2097. ASSERT_OK(env_->GetFileSize(sst_file, &file_size));
  2098. // Read metaindex
  2099. BlockContents bc;
  2100. const ReadOptions read_options;
  2101. ASSERT_OK(ReadMetaIndexBlockInFile(
  2102. r.get(), file_size, 0U, ImmutableOptions(options), read_options, &bc));
  2103. Block metaindex_block(std::move(bc));
  2104. std::unique_ptr<InternalIterator> meta_iter;
  2105. meta_iter.reset(metaindex_block.NewMetaIterator());
  2106. meta_iter->SeekToFirst();
  2107. if (strcmp(options.table_factory->Name(),
  2108. TableFactory::kBlockBasedTableName()) == 0) {
  2109. auto bbto = options.table_factory->GetOptions<BlockBasedTableOptions>();
  2110. if (bbto->filter_policy) {
  2111. if (bbto->partition_filters) {
  2112. // The key names are intentionally hard-coded here to detect
  2113. // accidental regression on compatibility.
  2114. EXPECT_EQ("partitionedfilter.rocksdb.BuiltinBloomFilter",
  2115. PopMetaIndexKey(meta_iter.get()));
  2116. } else {
  2117. EXPECT_EQ("fullfilter.rocksdb.BuiltinBloomFilter",
  2118. PopMetaIndexKey(meta_iter.get()));
  2119. }
  2120. }
  2121. if (bbto->index_type == BlockBasedTableOptions::kHashSearch) {
  2122. EXPECT_EQ("rocksdb.hashindex.metadata",
  2123. PopMetaIndexKey(meta_iter.get()));
  2124. EXPECT_EQ("rocksdb.hashindex.prefixes",
  2125. PopMetaIndexKey(meta_iter.get()));
  2126. }
  2127. if (bbto->format_version >= 6) {
  2128. EXPECT_EQ("rocksdb.index", PopMetaIndexKey(meta_iter.get()));
  2129. }
  2130. }
  2131. EXPECT_EQ("rocksdb.properties", PopMetaIndexKey(meta_iter.get()));
  2132. EXPECT_EQ("NOT_FOUND", PopMetaIndexKey(meta_iter.get()));
  2133. } while (ChangeOptions());
  2134. }
  2135. } // namespace ROCKSDB_NAMESPACE
  2136. int main(int argc, char** argv) {
  2137. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  2138. ::testing::InitGoogleTest(&argc, argv);
  2139. return RUN_ALL_TESTS();
  2140. }