internal_stats.cc 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424
  1. // This source code is licensed under both the GPLv2 (found in the
  2. // COPYING file in the root directory) and Apache 2.0 License
  3. // (found in the LICENSE.Apache file in the root directory).
  4. //
  5. // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
  6. //
  7. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  8. // Use of this source code is governed by a BSD-style license that can be
  9. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  10. #include "db/internal_stats.h"
  11. #include <algorithm>
  12. #include <cinttypes>
  13. #include <limits>
  14. #include <string>
  15. #include <utility>
  16. #include <vector>
  17. #include "db/column_family.h"
  18. #include "db/db_impl/db_impl.h"
  19. #include "table/block_based/block_based_table_factory.h"
  20. #include "util/string_util.h"
  21. namespace ROCKSDB_NAMESPACE {
  22. #ifndef ROCKSDB_LITE
  23. const std::map<LevelStatType, LevelStat> InternalStats::compaction_level_stats =
  24. {
  25. {LevelStatType::NUM_FILES, LevelStat{"NumFiles", "Files"}},
  26. {LevelStatType::COMPACTED_FILES,
  27. LevelStat{"CompactedFiles", "CompactedFiles"}},
  28. {LevelStatType::SIZE_BYTES, LevelStat{"SizeBytes", "Size"}},
  29. {LevelStatType::SCORE, LevelStat{"Score", "Score"}},
  30. {LevelStatType::READ_GB, LevelStat{"ReadGB", "Read(GB)"}},
  31. {LevelStatType::RN_GB, LevelStat{"RnGB", "Rn(GB)"}},
  32. {LevelStatType::RNP1_GB, LevelStat{"Rnp1GB", "Rnp1(GB)"}},
  33. {LevelStatType::WRITE_GB, LevelStat{"WriteGB", "Write(GB)"}},
  34. {LevelStatType::W_NEW_GB, LevelStat{"WnewGB", "Wnew(GB)"}},
  35. {LevelStatType::MOVED_GB, LevelStat{"MovedGB", "Moved(GB)"}},
  36. {LevelStatType::WRITE_AMP, LevelStat{"WriteAmp", "W-Amp"}},
  37. {LevelStatType::READ_MBPS, LevelStat{"ReadMBps", "Rd(MB/s)"}},
  38. {LevelStatType::WRITE_MBPS, LevelStat{"WriteMBps", "Wr(MB/s)"}},
  39. {LevelStatType::COMP_SEC, LevelStat{"CompSec", "Comp(sec)"}},
  40. {LevelStatType::COMP_CPU_SEC,
  41. LevelStat{"CompMergeCPU", "CompMergeCPU(sec)"}},
  42. {LevelStatType::COMP_COUNT, LevelStat{"CompCount", "Comp(cnt)"}},
  43. {LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}},
  44. {LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}},
  45. {LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}},
  46. };
  47. namespace {
  48. const double kMB = 1048576.0;
  49. const double kGB = kMB * 1024;
  50. const double kMicrosInSec = 1000000.0;
  51. void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name,
  52. const std::string& group_by) {
  53. int written_size =
  54. snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str());
  55. auto hdr = [](LevelStatType t) {
  56. return InternalStats::compaction_level_stats.at(t).header_name.c_str();
  57. };
  58. int line_size = snprintf(
  59. buf + written_size, len - written_size,
  60. "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
  61. // Note that we skip COMPACTED_FILES and merge it with Files column
  62. group_by.c_str(), hdr(LevelStatType::NUM_FILES),
  63. hdr(LevelStatType::SIZE_BYTES), hdr(LevelStatType::SCORE),
  64. hdr(LevelStatType::READ_GB), hdr(LevelStatType::RN_GB),
  65. hdr(LevelStatType::RNP1_GB), hdr(LevelStatType::WRITE_GB),
  66. hdr(LevelStatType::W_NEW_GB), hdr(LevelStatType::MOVED_GB),
  67. hdr(LevelStatType::WRITE_AMP), hdr(LevelStatType::READ_MBPS),
  68. hdr(LevelStatType::WRITE_MBPS), hdr(LevelStatType::COMP_SEC),
  69. hdr(LevelStatType::COMP_CPU_SEC), hdr(LevelStatType::COMP_COUNT),
  70. hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN),
  71. hdr(LevelStatType::KEY_DROP));
  72. written_size += line_size;
  73. snprintf(buf + written_size, len - written_size, "%s\n",
  74. std::string(line_size, '-').c_str());
  75. }
  76. void PrepareLevelStats(std::map<LevelStatType, double>* level_stats,
  77. int num_files, int being_compacted,
  78. double total_file_size, double score, double w_amp,
  79. const InternalStats::CompactionStats& stats) {
  80. uint64_t bytes_read =
  81. stats.bytes_read_non_output_levels + stats.bytes_read_output_level;
  82. int64_t bytes_new = stats.bytes_written - stats.bytes_read_output_level;
  83. double elapsed = (stats.micros + 1) / kMicrosInSec;
  84. (*level_stats)[LevelStatType::NUM_FILES] = num_files;
  85. (*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted;
  86. (*level_stats)[LevelStatType::SIZE_BYTES] = total_file_size;
  87. (*level_stats)[LevelStatType::SCORE] = score;
  88. (*level_stats)[LevelStatType::READ_GB] = bytes_read / kGB;
  89. (*level_stats)[LevelStatType::RN_GB] =
  90. stats.bytes_read_non_output_levels / kGB;
  91. (*level_stats)[LevelStatType::RNP1_GB] = stats.bytes_read_output_level / kGB;
  92. (*level_stats)[LevelStatType::WRITE_GB] = stats.bytes_written / kGB;
  93. (*level_stats)[LevelStatType::W_NEW_GB] = bytes_new / kGB;
  94. (*level_stats)[LevelStatType::MOVED_GB] = stats.bytes_moved / kGB;
  95. (*level_stats)[LevelStatType::WRITE_AMP] = w_amp;
  96. (*level_stats)[LevelStatType::READ_MBPS] = bytes_read / kMB / elapsed;
  97. (*level_stats)[LevelStatType::WRITE_MBPS] =
  98. stats.bytes_written / kMB / elapsed;
  99. (*level_stats)[LevelStatType::COMP_SEC] = stats.micros / kMicrosInSec;
  100. (*level_stats)[LevelStatType::COMP_CPU_SEC] = stats.cpu_micros / kMicrosInSec;
  101. (*level_stats)[LevelStatType::COMP_COUNT] = stats.count;
  102. (*level_stats)[LevelStatType::AVG_SEC] =
  103. stats.count == 0 ? 0 : stats.micros / kMicrosInSec / stats.count;
  104. (*level_stats)[LevelStatType::KEY_IN] =
  105. static_cast<double>(stats.num_input_records);
  106. (*level_stats)[LevelStatType::KEY_DROP] =
  107. static_cast<double>(stats.num_dropped_records);
  108. }
  109. void PrintLevelStats(char* buf, size_t len, const std::string& name,
  110. const std::map<LevelStatType, double>& stat_value) {
  111. snprintf(
  112. buf, len,
  113. "%4s " /* Level */
  114. "%6d/%-3d " /* Files */
  115. "%8s " /* Size */
  116. "%5.1f " /* Score */
  117. "%8.1f " /* Read(GB) */
  118. "%7.1f " /* Rn(GB) */
  119. "%8.1f " /* Rnp1(GB) */
  120. "%9.1f " /* Write(GB) */
  121. "%8.1f " /* Wnew(GB) */
  122. "%9.1f " /* Moved(GB) */
  123. "%5.1f " /* W-Amp */
  124. "%8.1f " /* Rd(MB/s) */
  125. "%8.1f " /* Wr(MB/s) */
  126. "%9.2f " /* Comp(sec) */
  127. "%17.2f " /* CompMergeCPU(sec) */
  128. "%9d " /* Comp(cnt) */
  129. "%8.3f " /* Avg(sec) */
  130. "%7s " /* KeyIn */
  131. "%6s\n", /* KeyDrop */
  132. name.c_str(), static_cast<int>(stat_value.at(LevelStatType::NUM_FILES)),
  133. static_cast<int>(stat_value.at(LevelStatType::COMPACTED_FILES)),
  134. BytesToHumanString(
  135. static_cast<uint64_t>(stat_value.at(LevelStatType::SIZE_BYTES)))
  136. .c_str(),
  137. stat_value.at(LevelStatType::SCORE),
  138. stat_value.at(LevelStatType::READ_GB),
  139. stat_value.at(LevelStatType::RN_GB),
  140. stat_value.at(LevelStatType::RNP1_GB),
  141. stat_value.at(LevelStatType::WRITE_GB),
  142. stat_value.at(LevelStatType::W_NEW_GB),
  143. stat_value.at(LevelStatType::MOVED_GB),
  144. stat_value.at(LevelStatType::WRITE_AMP),
  145. stat_value.at(LevelStatType::READ_MBPS),
  146. stat_value.at(LevelStatType::WRITE_MBPS),
  147. stat_value.at(LevelStatType::COMP_SEC),
  148. stat_value.at(LevelStatType::COMP_CPU_SEC),
  149. static_cast<int>(stat_value.at(LevelStatType::COMP_COUNT)),
  150. stat_value.at(LevelStatType::AVG_SEC),
  151. NumberToHumanString(
  152. static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_IN)))
  153. .c_str(),
  154. NumberToHumanString(
  155. static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_DROP)))
  156. .c_str());
  157. }
  158. void PrintLevelStats(char* buf, size_t len, const std::string& name,
  159. int num_files, int being_compacted, double total_file_size,
  160. double score, double w_amp,
  161. const InternalStats::CompactionStats& stats) {
  162. std::map<LevelStatType, double> level_stats;
  163. PrepareLevelStats(&level_stats, num_files, being_compacted, total_file_size,
  164. score, w_amp, stats);
  165. PrintLevelStats(buf, len, name, level_stats);
  166. }
  167. // Assumes that trailing numbers represent an optional argument. This requires
  168. // property names to not end with numbers.
  169. std::pair<Slice, Slice> GetPropertyNameAndArg(const Slice& property) {
  170. Slice name = property, arg = property;
  171. size_t sfx_len = 0;
  172. while (sfx_len < property.size() &&
  173. isdigit(property[property.size() - sfx_len - 1])) {
  174. ++sfx_len;
  175. }
  176. name.remove_suffix(sfx_len);
  177. arg.remove_prefix(property.size() - sfx_len);
  178. return {name, arg};
  179. }
  180. } // anonymous namespace
  181. static const std::string rocksdb_prefix = "rocksdb.";
  182. static const std::string num_files_at_level_prefix = "num-files-at-level";
  183. static const std::string compression_ratio_at_level_prefix =
  184. "compression-ratio-at-level";
  185. static const std::string allstats = "stats";
  186. static const std::string sstables = "sstables";
  187. static const std::string cfstats = "cfstats";
  188. static const std::string cfstats_no_file_histogram =
  189. "cfstats-no-file-histogram";
  190. static const std::string cf_file_histogram = "cf-file-histogram";
  191. static const std::string dbstats = "dbstats";
  192. static const std::string levelstats = "levelstats";
  193. static const std::string num_immutable_mem_table = "num-immutable-mem-table";
  194. static const std::string num_immutable_mem_table_flushed =
  195. "num-immutable-mem-table-flushed";
  196. static const std::string mem_table_flush_pending = "mem-table-flush-pending";
  197. static const std::string compaction_pending = "compaction-pending";
  198. static const std::string background_errors = "background-errors";
  199. static const std::string cur_size_active_mem_table =
  200. "cur-size-active-mem-table";
  201. static const std::string cur_size_all_mem_tables = "cur-size-all-mem-tables";
  202. static const std::string size_all_mem_tables = "size-all-mem-tables";
  203. static const std::string num_entries_active_mem_table =
  204. "num-entries-active-mem-table";
  205. static const std::string num_entries_imm_mem_tables =
  206. "num-entries-imm-mem-tables";
  207. static const std::string num_deletes_active_mem_table =
  208. "num-deletes-active-mem-table";
  209. static const std::string num_deletes_imm_mem_tables =
  210. "num-deletes-imm-mem-tables";
  211. static const std::string estimate_num_keys = "estimate-num-keys";
  212. static const std::string estimate_table_readers_mem =
  213. "estimate-table-readers-mem";
  214. static const std::string is_file_deletions_enabled =
  215. "is-file-deletions-enabled";
  216. static const std::string num_snapshots = "num-snapshots";
  217. static const std::string oldest_snapshot_time = "oldest-snapshot-time";
  218. static const std::string oldest_snapshot_sequence = "oldest-snapshot-sequence";
  219. static const std::string num_live_versions = "num-live-versions";
  220. static const std::string current_version_number =
  221. "current-super-version-number";
  222. static const std::string estimate_live_data_size = "estimate-live-data-size";
  223. static const std::string min_log_number_to_keep_str = "min-log-number-to-keep";
  224. static const std::string min_obsolete_sst_number_to_keep_str =
  225. "min-obsolete-sst-number-to-keep";
  226. static const std::string base_level_str = "base-level";
  227. static const std::string total_sst_files_size = "total-sst-files-size";
  228. static const std::string live_sst_files_size = "live-sst-files-size";
  229. static const std::string estimate_pending_comp_bytes =
  230. "estimate-pending-compaction-bytes";
  231. static const std::string aggregated_table_properties =
  232. "aggregated-table-properties";
  233. static const std::string aggregated_table_properties_at_level =
  234. aggregated_table_properties + "-at-level";
  235. static const std::string num_running_compactions = "num-running-compactions";
  236. static const std::string num_running_flushes = "num-running-flushes";
  237. static const std::string actual_delayed_write_rate =
  238. "actual-delayed-write-rate";
  239. static const std::string is_write_stopped = "is-write-stopped";
  240. static const std::string estimate_oldest_key_time = "estimate-oldest-key-time";
  241. static const std::string block_cache_capacity = "block-cache-capacity";
  242. static const std::string block_cache_usage = "block-cache-usage";
  243. static const std::string block_cache_pinned_usage = "block-cache-pinned-usage";
  244. static const std::string options_statistics = "options-statistics";
  245. const std::string DB::Properties::kNumFilesAtLevelPrefix =
  246. rocksdb_prefix + num_files_at_level_prefix;
  247. const std::string DB::Properties::kCompressionRatioAtLevelPrefix =
  248. rocksdb_prefix + compression_ratio_at_level_prefix;
  249. const std::string DB::Properties::kStats = rocksdb_prefix + allstats;
  250. const std::string DB::Properties::kSSTables = rocksdb_prefix + sstables;
  251. const std::string DB::Properties::kCFStats = rocksdb_prefix + cfstats;
  252. const std::string DB::Properties::kCFStatsNoFileHistogram =
  253. rocksdb_prefix + cfstats_no_file_histogram;
  254. const std::string DB::Properties::kCFFileHistogram =
  255. rocksdb_prefix + cf_file_histogram;
  256. const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats;
  257. const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats;
  258. const std::string DB::Properties::kNumImmutableMemTable =
  259. rocksdb_prefix + num_immutable_mem_table;
  260. const std::string DB::Properties::kNumImmutableMemTableFlushed =
  261. rocksdb_prefix + num_immutable_mem_table_flushed;
  262. const std::string DB::Properties::kMemTableFlushPending =
  263. rocksdb_prefix + mem_table_flush_pending;
  264. const std::string DB::Properties::kCompactionPending =
  265. rocksdb_prefix + compaction_pending;
  266. const std::string DB::Properties::kNumRunningCompactions =
  267. rocksdb_prefix + num_running_compactions;
  268. const std::string DB::Properties::kNumRunningFlushes =
  269. rocksdb_prefix + num_running_flushes;
  270. const std::string DB::Properties::kBackgroundErrors =
  271. rocksdb_prefix + background_errors;
  272. const std::string DB::Properties::kCurSizeActiveMemTable =
  273. rocksdb_prefix + cur_size_active_mem_table;
  274. const std::string DB::Properties::kCurSizeAllMemTables =
  275. rocksdb_prefix + cur_size_all_mem_tables;
  276. const std::string DB::Properties::kSizeAllMemTables =
  277. rocksdb_prefix + size_all_mem_tables;
  278. const std::string DB::Properties::kNumEntriesActiveMemTable =
  279. rocksdb_prefix + num_entries_active_mem_table;
  280. const std::string DB::Properties::kNumEntriesImmMemTables =
  281. rocksdb_prefix + num_entries_imm_mem_tables;
  282. const std::string DB::Properties::kNumDeletesActiveMemTable =
  283. rocksdb_prefix + num_deletes_active_mem_table;
  284. const std::string DB::Properties::kNumDeletesImmMemTables =
  285. rocksdb_prefix + num_deletes_imm_mem_tables;
  286. const std::string DB::Properties::kEstimateNumKeys =
  287. rocksdb_prefix + estimate_num_keys;
  288. const std::string DB::Properties::kEstimateTableReadersMem =
  289. rocksdb_prefix + estimate_table_readers_mem;
  290. const std::string DB::Properties::kIsFileDeletionsEnabled =
  291. rocksdb_prefix + is_file_deletions_enabled;
  292. const std::string DB::Properties::kNumSnapshots =
  293. rocksdb_prefix + num_snapshots;
  294. const std::string DB::Properties::kOldestSnapshotTime =
  295. rocksdb_prefix + oldest_snapshot_time;
  296. const std::string DB::Properties::kOldestSnapshotSequence =
  297. rocksdb_prefix + oldest_snapshot_sequence;
  298. const std::string DB::Properties::kNumLiveVersions =
  299. rocksdb_prefix + num_live_versions;
  300. const std::string DB::Properties::kCurrentSuperVersionNumber =
  301. rocksdb_prefix + current_version_number;
  302. const std::string DB::Properties::kEstimateLiveDataSize =
  303. rocksdb_prefix + estimate_live_data_size;
  304. const std::string DB::Properties::kMinLogNumberToKeep =
  305. rocksdb_prefix + min_log_number_to_keep_str;
  306. const std::string DB::Properties::kMinObsoleteSstNumberToKeep =
  307. rocksdb_prefix + min_obsolete_sst_number_to_keep_str;
  308. const std::string DB::Properties::kTotalSstFilesSize =
  309. rocksdb_prefix + total_sst_files_size;
  310. const std::string DB::Properties::kLiveSstFilesSize =
  311. rocksdb_prefix + live_sst_files_size;
  312. const std::string DB::Properties::kBaseLevel = rocksdb_prefix + base_level_str;
  313. const std::string DB::Properties::kEstimatePendingCompactionBytes =
  314. rocksdb_prefix + estimate_pending_comp_bytes;
  315. const std::string DB::Properties::kAggregatedTableProperties =
  316. rocksdb_prefix + aggregated_table_properties;
  317. const std::string DB::Properties::kAggregatedTablePropertiesAtLevel =
  318. rocksdb_prefix + aggregated_table_properties_at_level;
  319. const std::string DB::Properties::kActualDelayedWriteRate =
  320. rocksdb_prefix + actual_delayed_write_rate;
  321. const std::string DB::Properties::kIsWriteStopped =
  322. rocksdb_prefix + is_write_stopped;
  323. const std::string DB::Properties::kEstimateOldestKeyTime =
  324. rocksdb_prefix + estimate_oldest_key_time;
  325. const std::string DB::Properties::kBlockCacheCapacity =
  326. rocksdb_prefix + block_cache_capacity;
  327. const std::string DB::Properties::kBlockCacheUsage =
  328. rocksdb_prefix + block_cache_usage;
  329. const std::string DB::Properties::kBlockCachePinnedUsage =
  330. rocksdb_prefix + block_cache_pinned_usage;
  331. const std::string DB::Properties::kOptionsStatistics =
  332. rocksdb_prefix + options_statistics;
  333. const std::unordered_map<std::string, DBPropertyInfo>
  334. InternalStats::ppt_name_to_info = {
  335. {DB::Properties::kNumFilesAtLevelPrefix,
  336. {false, &InternalStats::HandleNumFilesAtLevel, nullptr, nullptr,
  337. nullptr}},
  338. {DB::Properties::kCompressionRatioAtLevelPrefix,
  339. {false, &InternalStats::HandleCompressionRatioAtLevelPrefix, nullptr,
  340. nullptr, nullptr}},
  341. {DB::Properties::kLevelStats,
  342. {false, &InternalStats::HandleLevelStats, nullptr, nullptr, nullptr}},
  343. {DB::Properties::kStats,
  344. {false, &InternalStats::HandleStats, nullptr, nullptr, nullptr}},
  345. {DB::Properties::kCFStats,
  346. {false, &InternalStats::HandleCFStats, nullptr,
  347. &InternalStats::HandleCFMapStats, nullptr}},
  348. {DB::Properties::kCFStatsNoFileHistogram,
  349. {false, &InternalStats::HandleCFStatsNoFileHistogram, nullptr, nullptr,
  350. nullptr}},
  351. {DB::Properties::kCFFileHistogram,
  352. {false, &InternalStats::HandleCFFileHistogram, nullptr, nullptr,
  353. nullptr}},
  354. {DB::Properties::kDBStats,
  355. {false, &InternalStats::HandleDBStats, nullptr, nullptr, nullptr}},
  356. {DB::Properties::kSSTables,
  357. {false, &InternalStats::HandleSsTables, nullptr, nullptr, nullptr}},
  358. {DB::Properties::kAggregatedTableProperties,
  359. {false, &InternalStats::HandleAggregatedTableProperties, nullptr,
  360. nullptr, nullptr}},
  361. {DB::Properties::kAggregatedTablePropertiesAtLevel,
  362. {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel,
  363. nullptr, nullptr, nullptr}},
  364. {DB::Properties::kNumImmutableMemTable,
  365. {false, nullptr, &InternalStats::HandleNumImmutableMemTable, nullptr,
  366. nullptr}},
  367. {DB::Properties::kNumImmutableMemTableFlushed,
  368. {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed,
  369. nullptr, nullptr}},
  370. {DB::Properties::kMemTableFlushPending,
  371. {false, nullptr, &InternalStats::HandleMemTableFlushPending, nullptr,
  372. nullptr}},
  373. {DB::Properties::kCompactionPending,
  374. {false, nullptr, &InternalStats::HandleCompactionPending, nullptr,
  375. nullptr}},
  376. {DB::Properties::kBackgroundErrors,
  377. {false, nullptr, &InternalStats::HandleBackgroundErrors, nullptr,
  378. nullptr}},
  379. {DB::Properties::kCurSizeActiveMemTable,
  380. {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable, nullptr,
  381. nullptr}},
  382. {DB::Properties::kCurSizeAllMemTables,
  383. {false, nullptr, &InternalStats::HandleCurSizeAllMemTables, nullptr,
  384. nullptr}},
  385. {DB::Properties::kSizeAllMemTables,
  386. {false, nullptr, &InternalStats::HandleSizeAllMemTables, nullptr,
  387. nullptr}},
  388. {DB::Properties::kNumEntriesActiveMemTable,
  389. {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable,
  390. nullptr, nullptr}},
  391. {DB::Properties::kNumEntriesImmMemTables,
  392. {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables, nullptr,
  393. nullptr}},
  394. {DB::Properties::kNumDeletesActiveMemTable,
  395. {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable,
  396. nullptr, nullptr}},
  397. {DB::Properties::kNumDeletesImmMemTables,
  398. {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables, nullptr,
  399. nullptr}},
  400. {DB::Properties::kEstimateNumKeys,
  401. {false, nullptr, &InternalStats::HandleEstimateNumKeys, nullptr,
  402. nullptr}},
  403. {DB::Properties::kEstimateTableReadersMem,
  404. {true, nullptr, &InternalStats::HandleEstimateTableReadersMem, nullptr,
  405. nullptr}},
  406. {DB::Properties::kIsFileDeletionsEnabled,
  407. {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled, nullptr,
  408. nullptr}},
  409. {DB::Properties::kNumSnapshots,
  410. {false, nullptr, &InternalStats::HandleNumSnapshots, nullptr,
  411. nullptr}},
  412. {DB::Properties::kOldestSnapshotTime,
  413. {false, nullptr, &InternalStats::HandleOldestSnapshotTime, nullptr,
  414. nullptr}},
  415. {DB::Properties::kOldestSnapshotSequence,
  416. {false, nullptr, &InternalStats::HandleOldestSnapshotSequence, nullptr,
  417. nullptr}},
  418. {DB::Properties::kNumLiveVersions,
  419. {false, nullptr, &InternalStats::HandleNumLiveVersions, nullptr,
  420. nullptr}},
  421. {DB::Properties::kCurrentSuperVersionNumber,
  422. {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber,
  423. nullptr, nullptr}},
  424. {DB::Properties::kEstimateLiveDataSize,
  425. {true, nullptr, &InternalStats::HandleEstimateLiveDataSize, nullptr,
  426. nullptr}},
  427. {DB::Properties::kMinLogNumberToKeep,
  428. {false, nullptr, &InternalStats::HandleMinLogNumberToKeep, nullptr,
  429. nullptr}},
  430. {DB::Properties::kMinObsoleteSstNumberToKeep,
  431. {false, nullptr, &InternalStats::HandleMinObsoleteSstNumberToKeep,
  432. nullptr, nullptr}},
  433. {DB::Properties::kBaseLevel,
  434. {false, nullptr, &InternalStats::HandleBaseLevel, nullptr, nullptr}},
  435. {DB::Properties::kTotalSstFilesSize,
  436. {false, nullptr, &InternalStats::HandleTotalSstFilesSize, nullptr,
  437. nullptr}},
  438. {DB::Properties::kLiveSstFilesSize,
  439. {false, nullptr, &InternalStats::HandleLiveSstFilesSize, nullptr,
  440. nullptr}},
  441. {DB::Properties::kEstimatePendingCompactionBytes,
  442. {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes,
  443. nullptr, nullptr}},
  444. {DB::Properties::kNumRunningFlushes,
  445. {false, nullptr, &InternalStats::HandleNumRunningFlushes, nullptr,
  446. nullptr}},
  447. {DB::Properties::kNumRunningCompactions,
  448. {false, nullptr, &InternalStats::HandleNumRunningCompactions, nullptr,
  449. nullptr}},
  450. {DB::Properties::kActualDelayedWriteRate,
  451. {false, nullptr, &InternalStats::HandleActualDelayedWriteRate, nullptr,
  452. nullptr}},
  453. {DB::Properties::kIsWriteStopped,
  454. {false, nullptr, &InternalStats::HandleIsWriteStopped, nullptr,
  455. nullptr}},
  456. {DB::Properties::kEstimateOldestKeyTime,
  457. {false, nullptr, &InternalStats::HandleEstimateOldestKeyTime, nullptr,
  458. nullptr}},
  459. {DB::Properties::kBlockCacheCapacity,
  460. {false, nullptr, &InternalStats::HandleBlockCacheCapacity, nullptr,
  461. nullptr}},
  462. {DB::Properties::kBlockCacheUsage,
  463. {false, nullptr, &InternalStats::HandleBlockCacheUsage, nullptr,
  464. nullptr}},
  465. {DB::Properties::kBlockCachePinnedUsage,
  466. {false, nullptr, &InternalStats::HandleBlockCachePinnedUsage, nullptr,
  467. nullptr}},
  468. {DB::Properties::kOptionsStatistics,
  469. {false, nullptr, nullptr, nullptr,
  470. &DBImpl::GetPropertyHandleOptionsStatistics}},
  471. };
  472. const DBPropertyInfo* GetPropertyInfo(const Slice& property) {
  473. std::string ppt_name = GetPropertyNameAndArg(property).first.ToString();
  474. auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name);
  475. if (ppt_info_iter == InternalStats::ppt_name_to_info.end()) {
  476. return nullptr;
  477. }
  478. return &ppt_info_iter->second;
  479. }
  480. bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info,
  481. const Slice& property,
  482. std::string* value) {
  483. assert(value != nullptr);
  484. assert(property_info.handle_string != nullptr);
  485. Slice arg = GetPropertyNameAndArg(property).second;
  486. return (this->*(property_info.handle_string))(value, arg);
  487. }
  488. bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info,
  489. const Slice& /*property*/,
  490. std::map<std::string, std::string>* value) {
  491. assert(value != nullptr);
  492. assert(property_info.handle_map != nullptr);
  493. return (this->*(property_info.handle_map))(value);
  494. }
  495. bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info,
  496. uint64_t* value, DBImpl* db) {
  497. assert(value != nullptr);
  498. assert(property_info.handle_int != nullptr &&
  499. !property_info.need_out_of_mutex);
  500. db->mutex_.AssertHeld();
  501. return (this->*(property_info.handle_int))(value, db, nullptr /* version */);
  502. }
  503. bool InternalStats::GetIntPropertyOutOfMutex(
  504. const DBPropertyInfo& property_info, Version* version, uint64_t* value) {
  505. assert(value != nullptr);
  506. assert(property_info.handle_int != nullptr &&
  507. property_info.need_out_of_mutex);
  508. return (this->*(property_info.handle_int))(value, nullptr /* db */, version);
  509. }
  510. bool InternalStats::HandleNumFilesAtLevel(std::string* value, Slice suffix) {
  511. uint64_t level;
  512. const auto* vstorage = cfd_->current()->storage_info();
  513. bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
  514. if (!ok || static_cast<int>(level) >= number_levels_) {
  515. return false;
  516. } else {
  517. char buf[100];
  518. snprintf(buf, sizeof(buf), "%d",
  519. vstorage->NumLevelFiles(static_cast<int>(level)));
  520. *value = buf;
  521. return true;
  522. }
  523. }
  524. bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value,
  525. Slice suffix) {
  526. uint64_t level;
  527. const auto* vstorage = cfd_->current()->storage_info();
  528. bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
  529. if (!ok || level >= static_cast<uint64_t>(number_levels_)) {
  530. return false;
  531. }
  532. *value = ToString(
  533. vstorage->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level)));
  534. return true;
  535. }
  536. bool InternalStats::HandleLevelStats(std::string* value, Slice /*suffix*/) {
  537. char buf[1000];
  538. const auto* vstorage = cfd_->current()->storage_info();
  539. snprintf(buf, sizeof(buf),
  540. "Level Files Size(MB)\n"
  541. "--------------------\n");
  542. value->append(buf);
  543. for (int level = 0; level < number_levels_; level++) {
  544. snprintf(buf, sizeof(buf), "%3d %8d %8.0f\n", level,
  545. vstorage->NumLevelFiles(level),
  546. vstorage->NumLevelBytes(level) / kMB);
  547. value->append(buf);
  548. }
  549. return true;
  550. }
  551. bool InternalStats::HandleStats(std::string* value, Slice suffix) {
  552. if (!HandleCFStats(value, suffix)) {
  553. return false;
  554. }
  555. if (!HandleDBStats(value, suffix)) {
  556. return false;
  557. }
  558. return true;
  559. }
  560. bool InternalStats::HandleCFMapStats(
  561. std::map<std::string, std::string>* cf_stats) {
  562. DumpCFMapStats(cf_stats);
  563. return true;
  564. }
  565. bool InternalStats::HandleCFStats(std::string* value, Slice /*suffix*/) {
  566. DumpCFStats(value);
  567. return true;
  568. }
  569. bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value,
  570. Slice /*suffix*/) {
  571. DumpCFStatsNoFileHistogram(value);
  572. return true;
  573. }
  574. bool InternalStats::HandleCFFileHistogram(std::string* value,
  575. Slice /*suffix*/) {
  576. DumpCFFileHistogram(value);
  577. return true;
  578. }
  579. bool InternalStats::HandleDBStats(std::string* value, Slice /*suffix*/) {
  580. DumpDBStats(value);
  581. return true;
  582. }
  583. bool InternalStats::HandleSsTables(std::string* value, Slice /*suffix*/) {
  584. auto* current = cfd_->current();
  585. *value = current->DebugString(true, true);
  586. return true;
  587. }
  588. bool InternalStats::HandleAggregatedTableProperties(std::string* value,
  589. Slice /*suffix*/) {
  590. std::shared_ptr<const TableProperties> tp;
  591. auto s = cfd_->current()->GetAggregatedTableProperties(&tp);
  592. if (!s.ok()) {
  593. return false;
  594. }
  595. *value = tp->ToString();
  596. return true;
  597. }
  598. bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value,
  599. Slice suffix) {
  600. uint64_t level;
  601. bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
  602. if (!ok || static_cast<int>(level) >= number_levels_) {
  603. return false;
  604. }
  605. std::shared_ptr<const TableProperties> tp;
  606. auto s = cfd_->current()->GetAggregatedTableProperties(
  607. &tp, static_cast<int>(level));
  608. if (!s.ok()) {
  609. return false;
  610. }
  611. *value = tp->ToString();
  612. return true;
  613. }
  614. bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* /*db*/,
  615. Version* /*version*/) {
  616. *value = cfd_->imm()->NumNotFlushed();
  617. return true;
  618. }
  619. bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value,
  620. DBImpl* /*db*/,
  621. Version* /*version*/) {
  622. *value = cfd_->imm()->NumFlushed();
  623. return true;
  624. }
  625. bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* /*db*/,
  626. Version* /*version*/) {
  627. *value = (cfd_->imm()->IsFlushPending() ? 1 : 0);
  628. return true;
  629. }
  630. bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db,
  631. Version* /*version*/) {
  632. *value = db->num_running_flushes();
  633. return true;
  634. }
  635. bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/,
  636. Version* /*version*/) {
  637. // 1 if the system already determines at least one compaction is needed.
  638. // 0 otherwise,
  639. const auto* vstorage = cfd_->current()->storage_info();
  640. *value = (cfd_->compaction_picker()->NeedsCompaction(vstorage) ? 1 : 0);
  641. return true;
  642. }
  643. bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
  644. Version* /*version*/) {
  645. *value = db->num_running_compactions_;
  646. return true;
  647. }
  648. bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* /*db*/,
  649. Version* /*version*/) {
  650. // Accumulated number of errors in background flushes or compactions.
  651. *value = GetBackgroundErrorCount();
  652. return true;
  653. }
  654. bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* /*db*/,
  655. Version* /*version*/) {
  656. // Current size of the active memtable
  657. *value = cfd_->mem()->ApproximateMemoryUsage();
  658. return true;
  659. }
  660. bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* /*db*/,
  661. Version* /*version*/) {
  662. // Current size of the active memtable + immutable memtables
  663. *value = cfd_->mem()->ApproximateMemoryUsage() +
  664. cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage();
  665. return true;
  666. }
  667. bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* /*db*/,
  668. Version* /*version*/) {
  669. *value = cfd_->mem()->ApproximateMemoryUsage() +
  670. cfd_->imm()->ApproximateMemoryUsage();
  671. return true;
  672. }
  673. bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value,
  674. DBImpl* /*db*/,
  675. Version* /*version*/) {
  676. // Current number of entires in the active memtable
  677. *value = cfd_->mem()->num_entries();
  678. return true;
  679. }
  680. bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value,
  681. DBImpl* /*db*/,
  682. Version* /*version*/) {
  683. // Current number of entries in the immutable memtables
  684. *value = cfd_->imm()->current()->GetTotalNumEntries();
  685. return true;
  686. }
  687. bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value,
  688. DBImpl* /*db*/,
  689. Version* /*version*/) {
  690. // Current number of entires in the active memtable
  691. *value = cfd_->mem()->num_deletes();
  692. return true;
  693. }
  694. bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value,
  695. DBImpl* /*db*/,
  696. Version* /*version*/) {
  697. // Current number of entries in the immutable memtables
  698. *value = cfd_->imm()->current()->GetTotalNumDeletes();
  699. return true;
  700. }
  701. bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/,
  702. Version* /*version*/) {
  703. // Estimate number of entries in the column family:
  704. // Use estimated entries in tables + total entries in memtables.
  705. const auto* vstorage = cfd_->current()->storage_info();
  706. uint64_t estimate_keys = cfd_->mem()->num_entries() +
  707. cfd_->imm()->current()->GetTotalNumEntries() +
  708. vstorage->GetEstimatedActiveKeys();
  709. uint64_t estimate_deletes =
  710. cfd_->mem()->num_deletes() + cfd_->imm()->current()->GetTotalNumDeletes();
  711. *value = estimate_keys > estimate_deletes * 2
  712. ? estimate_keys - (estimate_deletes * 2)
  713. : 0;
  714. return true;
  715. }
  716. bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db,
  717. Version* /*version*/) {
  718. *value = db->snapshots().count();
  719. return true;
  720. }
  721. bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db,
  722. Version* /*version*/) {
  723. *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime());
  724. return true;
  725. }
  726. bool InternalStats::HandleOldestSnapshotSequence(uint64_t* value, DBImpl* db,
  727. Version* /*version*/) {
  728. *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotSequence());
  729. return true;
  730. }
  731. bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* /*db*/,
  732. Version* /*version*/) {
  733. *value = cfd_->GetNumLiveVersions();
  734. return true;
  735. }
  736. bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value,
  737. DBImpl* /*db*/,
  738. Version* /*version*/) {
  739. *value = cfd_->GetSuperVersionNumber();
  740. return true;
  741. }
  742. bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
  743. Version* /*version*/) {
  744. *value = db->IsFileDeletionsEnabled();
  745. return true;
  746. }
  747. bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* /*db*/,
  748. Version* /*version*/) {
  749. const auto* vstorage = cfd_->current()->storage_info();
  750. *value = vstorage->base_level();
  751. return true;
  752. }
  753. bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* /*db*/,
  754. Version* /*version*/) {
  755. *value = cfd_->GetTotalSstFilesSize();
  756. return true;
  757. }
  758. bool InternalStats::HandleLiveSstFilesSize(uint64_t* value, DBImpl* /*db*/,
  759. Version* /*version*/) {
  760. *value = cfd_->GetLiveSstFilesSize();
  761. return true;
  762. }
  763. bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value,
  764. DBImpl* /*db*/,
  765. Version* /*version*/) {
  766. const auto* vstorage = cfd_->current()->storage_info();
  767. *value = vstorage->estimated_compaction_needed_bytes();
  768. return true;
  769. }
  770. bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value,
  771. DBImpl* /*db*/,
  772. Version* version) {
  773. *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders();
  774. return true;
  775. }
  776. bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* /*db*/,
  777. Version* version) {
  778. const auto* vstorage = version->storage_info();
  779. *value = vstorage->EstimateLiveDataSize();
  780. return true;
  781. }
  782. bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db,
  783. Version* /*version*/) {
  784. *value = db->MinLogNumberToKeep();
  785. return true;
  786. }
  787. bool InternalStats::HandleMinObsoleteSstNumberToKeep(uint64_t* value,
  788. DBImpl* db,
  789. Version* /*version*/) {
  790. *value = db->MinObsoleteSstNumberToKeep();
  791. return true;
  792. }
  793. bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
  794. Version* /*version*/) {
  795. const WriteController& wc = db->write_controller();
  796. if (!wc.NeedsDelay()) {
  797. *value = 0;
  798. } else {
  799. *value = wc.delayed_write_rate();
  800. }
  801. return true;
  802. }
  803. bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db,
  804. Version* /*version*/) {
  805. *value = db->write_controller().IsStopped() ? 1 : 0;
  806. return true;
  807. }
  808. bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* /*db*/,
  809. Version* /*version*/) {
  810. // TODO(yiwu): The property is currently available for fifo compaction
  811. // with allow_compaction = false. This is because we don't propagate
  812. // oldest_key_time on compaction.
  813. if (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO ||
  814. cfd_->GetCurrentMutableCFOptions()
  815. ->compaction_options_fifo.allow_compaction) {
  816. return false;
  817. }
  818. TablePropertiesCollection collection;
  819. auto s = cfd_->current()->GetPropertiesOfAllTables(&collection);
  820. if (!s.ok()) {
  821. return false;
  822. }
  823. *value = std::numeric_limits<uint64_t>::max();
  824. for (auto& p : collection) {
  825. *value = std::min(*value, p.second->oldest_key_time);
  826. if (*value == 0) {
  827. break;
  828. }
  829. }
  830. if (*value > 0) {
  831. *value = std::min({cfd_->mem()->ApproximateOldestKeyTime(),
  832. cfd_->imm()->ApproximateOldestKeyTime(), *value});
  833. }
  834. return *value > 0 && *value < std::numeric_limits<uint64_t>::max();
  835. }
  836. bool InternalStats::HandleBlockCacheStat(Cache** block_cache) {
  837. assert(block_cache != nullptr);
  838. auto* table_factory = cfd_->ioptions()->table_factory;
  839. assert(table_factory != nullptr);
  840. if (BlockBasedTableFactory::kName != table_factory->Name()) {
  841. return false;
  842. }
  843. auto* table_options =
  844. reinterpret_cast<BlockBasedTableOptions*>(table_factory->GetOptions());
  845. if (table_options == nullptr) {
  846. return false;
  847. }
  848. *block_cache = table_options->block_cache.get();
  849. if (table_options->no_block_cache || *block_cache == nullptr) {
  850. return false;
  851. }
  852. return true;
  853. }
  854. bool InternalStats::HandleBlockCacheCapacity(uint64_t* value, DBImpl* /*db*/,
  855. Version* /*version*/) {
  856. Cache* block_cache;
  857. bool ok = HandleBlockCacheStat(&block_cache);
  858. if (!ok) {
  859. return false;
  860. }
  861. *value = static_cast<uint64_t>(block_cache->GetCapacity());
  862. return true;
  863. }
  864. bool InternalStats::HandleBlockCacheUsage(uint64_t* value, DBImpl* /*db*/,
  865. Version* /*version*/) {
  866. Cache* block_cache;
  867. bool ok = HandleBlockCacheStat(&block_cache);
  868. if (!ok) {
  869. return false;
  870. }
  871. *value = static_cast<uint64_t>(block_cache->GetUsage());
  872. return true;
  873. }
  874. bool InternalStats::HandleBlockCachePinnedUsage(uint64_t* value, DBImpl* /*db*/,
  875. Version* /*version*/) {
  876. Cache* block_cache;
  877. bool ok = HandleBlockCacheStat(&block_cache);
  878. if (!ok) {
  879. return false;
  880. }
  881. *value = static_cast<uint64_t>(block_cache->GetPinnedUsage());
  882. return true;
  883. }
  884. void InternalStats::DumpDBStats(std::string* value) {
  885. char buf[1000];
  886. // DB-level stats, only available from default column family
  887. double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
  888. double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up;
  889. snprintf(buf, sizeof(buf),
  890. "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
  891. seconds_up, interval_seconds_up);
  892. value->append(buf);
  893. // Cumulative
  894. uint64_t user_bytes_written =
  895. GetDBStats(InternalStats::kIntStatsBytesWritten);
  896. uint64_t num_keys_written =
  897. GetDBStats(InternalStats::kIntStatsNumKeysWritten);
  898. uint64_t write_other = GetDBStats(InternalStats::kIntStatsWriteDoneByOther);
  899. uint64_t write_self = GetDBStats(InternalStats::kIntStatsWriteDoneBySelf);
  900. uint64_t wal_bytes = GetDBStats(InternalStats::kIntStatsWalFileBytes);
  901. uint64_t wal_synced = GetDBStats(InternalStats::kIntStatsWalFileSynced);
  902. uint64_t write_with_wal = GetDBStats(InternalStats::kIntStatsWriteWithWal);
  903. uint64_t write_stall_micros =
  904. GetDBStats(InternalStats::kIntStatsWriteStallMicros);
  905. const int kHumanMicrosLen = 32;
  906. char human_micros[kHumanMicrosLen];
  907. // Data
  908. // writes: total number of write requests.
  909. // keys: total number of key updates issued by all the write requests
  910. // commit groups: number of group commits issued to the DB. Each group can
  911. // contain one or more writes.
  912. // so writes/keys is the average number of put in multi-put or put
  913. // writes/groups is the average group commit size.
  914. //
  915. // The format is the same for interval stats.
  916. snprintf(buf, sizeof(buf),
  917. "Cumulative writes: %s writes, %s keys, %s commit groups, "
  918. "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n",
  919. NumberToHumanString(write_other + write_self).c_str(),
  920. NumberToHumanString(num_keys_written).c_str(),
  921. NumberToHumanString(write_self).c_str(),
  922. (write_other + write_self) / static_cast<double>(write_self + 1),
  923. user_bytes_written / kGB, user_bytes_written / kMB / seconds_up);
  924. value->append(buf);
  925. // WAL
  926. snprintf(buf, sizeof(buf),
  927. "Cumulative WAL: %s writes, %s syncs, "
  928. "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
  929. NumberToHumanString(write_with_wal).c_str(),
  930. NumberToHumanString(wal_synced).c_str(),
  931. write_with_wal / static_cast<double>(wal_synced + 1),
  932. wal_bytes / kGB, wal_bytes / kMB / seconds_up);
  933. value->append(buf);
  934. // Stall
  935. AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true);
  936. snprintf(buf, sizeof(buf), "Cumulative stall: %s, %.1f percent\n",
  937. human_micros,
  938. // 10000 = divide by 1M to get secs, then multiply by 100 for pct
  939. write_stall_micros / 10000.0 / std::max(seconds_up, 0.001));
  940. value->append(buf);
  941. // Interval
  942. uint64_t interval_write_other = write_other - db_stats_snapshot_.write_other;
  943. uint64_t interval_write_self = write_self - db_stats_snapshot_.write_self;
  944. uint64_t interval_num_keys_written =
  945. num_keys_written - db_stats_snapshot_.num_keys_written;
  946. snprintf(
  947. buf, sizeof(buf),
  948. "Interval writes: %s writes, %s keys, %s commit groups, "
  949. "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n",
  950. NumberToHumanString(interval_write_other + interval_write_self).c_str(),
  951. NumberToHumanString(interval_num_keys_written).c_str(),
  952. NumberToHumanString(interval_write_self).c_str(),
  953. static_cast<double>(interval_write_other + interval_write_self) /
  954. (interval_write_self + 1),
  955. (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB,
  956. (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB /
  957. std::max(interval_seconds_up, 0.001)),
  958. value->append(buf);
  959. uint64_t interval_write_with_wal =
  960. write_with_wal - db_stats_snapshot_.write_with_wal;
  961. uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced;
  962. uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes;
  963. snprintf(
  964. buf, sizeof(buf),
  965. "Interval WAL: %s writes, %s syncs, "
  966. "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
  967. NumberToHumanString(interval_write_with_wal).c_str(),
  968. NumberToHumanString(interval_wal_synced).c_str(),
  969. interval_write_with_wal / static_cast<double>(interval_wal_synced + 1),
  970. interval_wal_bytes / kGB,
  971. interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
  972. value->append(buf);
  973. // Stall
  974. AppendHumanMicros(write_stall_micros - db_stats_snapshot_.write_stall_micros,
  975. human_micros, kHumanMicrosLen, true);
  976. snprintf(buf, sizeof(buf), "Interval stall: %s, %.1f percent\n", human_micros,
  977. // 10000 = divide by 1M to get secs, then multiply by 100 for pct
  978. (write_stall_micros - db_stats_snapshot_.write_stall_micros) /
  979. 10000.0 / std::max(interval_seconds_up, 0.001));
  980. value->append(buf);
  981. db_stats_snapshot_.seconds_up = seconds_up;
  982. db_stats_snapshot_.ingest_bytes = user_bytes_written;
  983. db_stats_snapshot_.write_other = write_other;
  984. db_stats_snapshot_.write_self = write_self;
  985. db_stats_snapshot_.num_keys_written = num_keys_written;
  986. db_stats_snapshot_.wal_bytes = wal_bytes;
  987. db_stats_snapshot_.wal_synced = wal_synced;
  988. db_stats_snapshot_.write_with_wal = write_with_wal;
  989. db_stats_snapshot_.write_stall_micros = write_stall_micros;
  990. }
  991. /**
  992. * Dump Compaction Level stats to a map of stat name with "compaction." prefix
  993. * to value in double as string. The level in stat name is represented with
  994. * a prefix "Lx" where "x" is the level number. A special level "Sum"
  995. * represents the sum of a stat for all levels.
  996. * The result also contains IO stall counters which keys start with "io_stalls."
  997. * and values represent uint64 encoded as strings.
  998. */
  999. void InternalStats::DumpCFMapStats(
  1000. std::map<std::string, std::string>* cf_stats) {
  1001. CompactionStats compaction_stats_sum;
  1002. std::map<int, std::map<LevelStatType, double>> levels_stats;
  1003. DumpCFMapStats(&levels_stats, &compaction_stats_sum);
  1004. for (auto const& level_ent : levels_stats) {
  1005. auto level_str =
  1006. level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first);
  1007. for (auto const& stat_ent : level_ent.second) {
  1008. auto stat_type = stat_ent.first;
  1009. auto key_str =
  1010. "compaction." + level_str + "." +
  1011. InternalStats::compaction_level_stats.at(stat_type).property_name;
  1012. (*cf_stats)[key_str] = std::to_string(stat_ent.second);
  1013. }
  1014. }
  1015. DumpCFMapStatsIOStalls(cf_stats);
  1016. }
  1017. void InternalStats::DumpCFMapStats(
  1018. std::map<int, std::map<LevelStatType, double>>* levels_stats,
  1019. CompactionStats* compaction_stats_sum) {
  1020. const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
  1021. int num_levels_to_check =
  1022. (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO)
  1023. ? vstorage->num_levels() - 1
  1024. : 1;
  1025. // Compaction scores are sorted based on its value. Restore them to the
  1026. // level order
  1027. std::vector<double> compaction_score(number_levels_, 0);
  1028. for (int i = 0; i < num_levels_to_check; ++i) {
  1029. compaction_score[vstorage->CompactionScoreLevel(i)] =
  1030. vstorage->CompactionScore(i);
  1031. }
  1032. // Count # of files being compacted for each level
  1033. std::vector<int> files_being_compacted(number_levels_, 0);
  1034. for (int level = 0; level < number_levels_; ++level) {
  1035. for (auto* f : vstorage->LevelFiles(level)) {
  1036. if (f->being_compacted) {
  1037. ++files_being_compacted[level];
  1038. }
  1039. }
  1040. }
  1041. int total_files = 0;
  1042. int total_files_being_compacted = 0;
  1043. double total_file_size = 0;
  1044. uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
  1045. uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
  1046. uint64_t curr_ingest = flush_ingest + add_file_ingest;
  1047. for (int level = 0; level < number_levels_; level++) {
  1048. int files = vstorage->NumLevelFiles(level);
  1049. total_files += files;
  1050. total_files_being_compacted += files_being_compacted[level];
  1051. if (comp_stats_[level].micros > 0 || files > 0) {
  1052. compaction_stats_sum->Add(comp_stats_[level]);
  1053. total_file_size += vstorage->NumLevelBytes(level);
  1054. uint64_t input_bytes;
  1055. if (level == 0) {
  1056. input_bytes = curr_ingest;
  1057. } else {
  1058. input_bytes = comp_stats_[level].bytes_read_non_output_levels;
  1059. }
  1060. double w_amp =
  1061. (input_bytes == 0)
  1062. ? 0.0
  1063. : static_cast<double>(comp_stats_[level].bytes_written) /
  1064. input_bytes;
  1065. std::map<LevelStatType, double> level_stats;
  1066. PrepareLevelStats(&level_stats, files, files_being_compacted[level],
  1067. static_cast<double>(vstorage->NumLevelBytes(level)),
  1068. compaction_score[level], w_amp, comp_stats_[level]);
  1069. (*levels_stats)[level] = level_stats;
  1070. }
  1071. }
  1072. // Cumulative summary
  1073. double w_amp = compaction_stats_sum->bytes_written /
  1074. static_cast<double>(curr_ingest + 1);
  1075. // Stats summary across levels
  1076. std::map<LevelStatType, double> sum_stats;
  1077. PrepareLevelStats(&sum_stats, total_files, total_files_being_compacted,
  1078. total_file_size, 0, w_amp, *compaction_stats_sum);
  1079. (*levels_stats)[-1] = sum_stats; // -1 is for the Sum level
  1080. }
  1081. void InternalStats::DumpCFMapStatsByPriority(
  1082. std::map<int, std::map<LevelStatType, double>>* priorities_stats) {
  1083. for (size_t priority = 0; priority < comp_stats_by_pri_.size(); priority++) {
  1084. if (comp_stats_by_pri_[priority].micros > 0) {
  1085. std::map<LevelStatType, double> priority_stats;
  1086. PrepareLevelStats(&priority_stats, 0 /* num_files */,
  1087. 0 /* being_compacted */, 0 /* total_file_size */,
  1088. 0 /* compaction_score */, 0 /* w_amp */,
  1089. comp_stats_by_pri_[priority]);
  1090. (*priorities_stats)[static_cast<int>(priority)] = priority_stats;
  1091. }
  1092. }
  1093. }
  1094. void InternalStats::DumpCFMapStatsIOStalls(
  1095. std::map<std::string, std::string>* cf_stats) {
  1096. (*cf_stats)["io_stalls.level0_slowdown"] =
  1097. std::to_string(cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS]);
  1098. (*cf_stats)["io_stalls.level0_slowdown_with_compaction"] =
  1099. std::to_string(cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS]);
  1100. (*cf_stats)["io_stalls.level0_numfiles"] =
  1101. std::to_string(cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS]);
  1102. (*cf_stats)["io_stalls.level0_numfiles_with_compaction"] =
  1103. std::to_string(cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_STOPS]);
  1104. (*cf_stats)["io_stalls.stop_for_pending_compaction_bytes"] =
  1105. std::to_string(cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS]);
  1106. (*cf_stats)["io_stalls.slowdown_for_pending_compaction_bytes"] =
  1107. std::to_string(cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS]);
  1108. (*cf_stats)["io_stalls.memtable_compaction"] =
  1109. std::to_string(cf_stats_count_[MEMTABLE_LIMIT_STOPS]);
  1110. (*cf_stats)["io_stalls.memtable_slowdown"] =
  1111. std::to_string(cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS]);
  1112. uint64_t total_stop = cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS] +
  1113. cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS] +
  1114. cf_stats_count_[MEMTABLE_LIMIT_STOPS];
  1115. uint64_t total_slowdown =
  1116. cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS] +
  1117. cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS] +
  1118. cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS];
  1119. (*cf_stats)["io_stalls.total_stop"] = std::to_string(total_stop);
  1120. (*cf_stats)["io_stalls.total_slowdown"] = std::to_string(total_slowdown);
  1121. }
  1122. void InternalStats::DumpCFStats(std::string* value) {
  1123. DumpCFStatsNoFileHistogram(value);
  1124. DumpCFFileHistogram(value);
  1125. }
  1126. void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) {
  1127. char buf[2000];
  1128. // Per-ColumnFamily stats
  1129. PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName(), "Level");
  1130. value->append(buf);
  1131. // Print stats for each level
  1132. std::map<int, std::map<LevelStatType, double>> levels_stats;
  1133. CompactionStats compaction_stats_sum;
  1134. DumpCFMapStats(&levels_stats, &compaction_stats_sum);
  1135. for (int l = 0; l < number_levels_; ++l) {
  1136. if (levels_stats.find(l) != levels_stats.end()) {
  1137. PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]);
  1138. value->append(buf);
  1139. }
  1140. }
  1141. // Print sum of level stats
  1142. PrintLevelStats(buf, sizeof(buf), "Sum", levels_stats[-1]);
  1143. value->append(buf);
  1144. uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
  1145. uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
  1146. uint64_t ingest_files_addfile = cf_stats_value_[INGESTED_NUM_FILES_TOTAL];
  1147. uint64_t ingest_l0_files_addfile =
  1148. cf_stats_value_[INGESTED_LEVEL0_NUM_FILES_TOTAL];
  1149. uint64_t ingest_keys_addfile = cf_stats_value_[INGESTED_NUM_KEYS_TOTAL];
  1150. // Cumulative summary
  1151. uint64_t total_stall_count =
  1152. cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS] +
  1153. cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS] +
  1154. cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS] +
  1155. cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS] +
  1156. cf_stats_count_[MEMTABLE_LIMIT_STOPS] +
  1157. cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS];
  1158. // Interval summary
  1159. uint64_t interval_flush_ingest =
  1160. flush_ingest - cf_stats_snapshot_.ingest_bytes_flush;
  1161. uint64_t interval_add_file_inget =
  1162. add_file_ingest - cf_stats_snapshot_.ingest_bytes_addfile;
  1163. uint64_t interval_ingest =
  1164. interval_flush_ingest + interval_add_file_inget + 1;
  1165. CompactionStats interval_stats(compaction_stats_sum);
  1166. interval_stats.Subtract(cf_stats_snapshot_.comp_stats);
  1167. double w_amp =
  1168. interval_stats.bytes_written / static_cast<double>(interval_ingest);
  1169. PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats);
  1170. value->append(buf);
  1171. PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName(), "Priority");
  1172. value->append(buf);
  1173. std::map<int, std::map<LevelStatType, double>> priorities_stats;
  1174. DumpCFMapStatsByPriority(&priorities_stats);
  1175. for (size_t priority = 0; priority < comp_stats_by_pri_.size(); ++priority) {
  1176. if (priorities_stats.find(static_cast<int>(priority)) !=
  1177. priorities_stats.end()) {
  1178. PrintLevelStats(
  1179. buf, sizeof(buf),
  1180. Env::PriorityToString(static_cast<Env::Priority>(priority)),
  1181. priorities_stats[static_cast<int>(priority)]);
  1182. value->append(buf);
  1183. }
  1184. }
  1185. double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
  1186. double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up;
  1187. snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
  1188. seconds_up, interval_seconds_up);
  1189. value->append(buf);
  1190. snprintf(buf, sizeof(buf), "Flush(GB): cumulative %.3f, interval %.3f\n",
  1191. flush_ingest / kGB, interval_flush_ingest / kGB);
  1192. value->append(buf);
  1193. snprintf(buf, sizeof(buf), "AddFile(GB): cumulative %.3f, interval %.3f\n",
  1194. add_file_ingest / kGB, interval_add_file_inget / kGB);
  1195. value->append(buf);
  1196. uint64_t interval_ingest_files_addfile =
  1197. ingest_files_addfile - cf_stats_snapshot_.ingest_files_addfile;
  1198. snprintf(buf, sizeof(buf),
  1199. "AddFile(Total Files): cumulative %" PRIu64 ", interval %" PRIu64
  1200. "\n",
  1201. ingest_files_addfile, interval_ingest_files_addfile);
  1202. value->append(buf);
  1203. uint64_t interval_ingest_l0_files_addfile =
  1204. ingest_l0_files_addfile - cf_stats_snapshot_.ingest_l0_files_addfile;
  1205. snprintf(buf, sizeof(buf),
  1206. "AddFile(L0 Files): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
  1207. ingest_l0_files_addfile, interval_ingest_l0_files_addfile);
  1208. value->append(buf);
  1209. uint64_t interval_ingest_keys_addfile =
  1210. ingest_keys_addfile - cf_stats_snapshot_.ingest_keys_addfile;
  1211. snprintf(buf, sizeof(buf),
  1212. "AddFile(Keys): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
  1213. ingest_keys_addfile, interval_ingest_keys_addfile);
  1214. value->append(buf);
  1215. // Compact
  1216. uint64_t compact_bytes_read = 0;
  1217. uint64_t compact_bytes_write = 0;
  1218. uint64_t compact_micros = 0;
  1219. for (int level = 0; level < number_levels_; level++) {
  1220. compact_bytes_read += comp_stats_[level].bytes_read_output_level +
  1221. comp_stats_[level].bytes_read_non_output_levels;
  1222. compact_bytes_write += comp_stats_[level].bytes_written;
  1223. compact_micros += comp_stats_[level].micros;
  1224. }
  1225. snprintf(buf, sizeof(buf),
  1226. "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
  1227. "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
  1228. compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up,
  1229. compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up,
  1230. compact_micros / kMicrosInSec);
  1231. value->append(buf);
  1232. // Compaction interval
  1233. uint64_t interval_compact_bytes_write =
  1234. compact_bytes_write - cf_stats_snapshot_.compact_bytes_write;
  1235. uint64_t interval_compact_bytes_read =
  1236. compact_bytes_read - cf_stats_snapshot_.compact_bytes_read;
  1237. uint64_t interval_compact_micros =
  1238. compact_micros - cf_stats_snapshot_.compact_micros;
  1239. snprintf(
  1240. buf, sizeof(buf),
  1241. "Interval compaction: %.2f GB write, %.2f MB/s write, "
  1242. "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
  1243. interval_compact_bytes_write / kGB,
  1244. interval_compact_bytes_write / kMB / std::max(interval_seconds_up, 0.001),
  1245. interval_compact_bytes_read / kGB,
  1246. interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001),
  1247. interval_compact_micros / kMicrosInSec);
  1248. value->append(buf);
  1249. cf_stats_snapshot_.compact_bytes_write = compact_bytes_write;
  1250. cf_stats_snapshot_.compact_bytes_read = compact_bytes_read;
  1251. cf_stats_snapshot_.compact_micros = compact_micros;
  1252. snprintf(buf, sizeof(buf),
  1253. "Stalls(count): %" PRIu64
  1254. " level0_slowdown, "
  1255. "%" PRIu64
  1256. " level0_slowdown_with_compaction, "
  1257. "%" PRIu64
  1258. " level0_numfiles, "
  1259. "%" PRIu64
  1260. " level0_numfiles_with_compaction, "
  1261. "%" PRIu64
  1262. " stop for pending_compaction_bytes, "
  1263. "%" PRIu64
  1264. " slowdown for pending_compaction_bytes, "
  1265. "%" PRIu64
  1266. " memtable_compaction, "
  1267. "%" PRIu64
  1268. " memtable_slowdown, "
  1269. "interval %" PRIu64 " total count\n",
  1270. cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS],
  1271. cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS],
  1272. cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS],
  1273. cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_STOPS],
  1274. cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS],
  1275. cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS],
  1276. cf_stats_count_[MEMTABLE_LIMIT_STOPS],
  1277. cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS],
  1278. total_stall_count - cf_stats_snapshot_.stall_count);
  1279. value->append(buf);
  1280. cf_stats_snapshot_.seconds_up = seconds_up;
  1281. cf_stats_snapshot_.ingest_bytes_flush = flush_ingest;
  1282. cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest;
  1283. cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile;
  1284. cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile;
  1285. cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile;
  1286. cf_stats_snapshot_.comp_stats = compaction_stats_sum;
  1287. cf_stats_snapshot_.stall_count = total_stall_count;
  1288. }
  1289. void InternalStats::DumpCFFileHistogram(std::string* value) {
  1290. char buf[2000];
  1291. snprintf(buf, sizeof(buf),
  1292. "\n** File Read Latency Histogram By Level [%s] **\n",
  1293. cfd_->GetName().c_str());
  1294. value->append(buf);
  1295. for (int level = 0; level < number_levels_; level++) {
  1296. if (!file_read_latency_[level].Empty()) {
  1297. char buf2[5000];
  1298. snprintf(buf2, sizeof(buf2),
  1299. "** Level %d read latency histogram (micros):\n%s\n", level,
  1300. file_read_latency_[level].ToString().c_str());
  1301. value->append(buf2);
  1302. }
  1303. }
  1304. }
  1305. #else
  1306. const DBPropertyInfo* GetPropertyInfo(const Slice& /*property*/) {
  1307. return nullptr;
  1308. }
  1309. #endif // !ROCKSDB_LITE
  1310. } // namespace ROCKSDB_NAMESPACE