perf_context_test.cc 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. #include "rocksdb/perf_context.h"
  7. #include <algorithm>
  8. #include <iostream>
  9. #include <thread>
  10. #include <vector>
  11. #include "monitoring/histogram.h"
  12. #include "monitoring/instrumented_mutex.h"
  13. #include "monitoring/perf_context_imp.h"
  14. #include "monitoring/thread_status_util.h"
  15. #include "port/port.h"
  16. #include "rocksdb/db.h"
  17. #include "rocksdb/memtablerep.h"
  18. #include "rocksdb/slice_transform.h"
  19. #include "rocksdb/system_clock.h"
  20. #include "test_util/testharness.h"
  21. #include "util/stop_watch.h"
  22. #include "util/string_util.h"
  23. #include "utilities/merge_operators.h"
  24. bool FLAGS_random_key = false;
  25. bool FLAGS_use_set_based_memetable = false;
  26. int FLAGS_total_keys = 100;
  27. int FLAGS_write_buffer_size = 1000000000;
  28. int FLAGS_max_write_buffer_number = 8;
  29. int FLAGS_min_write_buffer_number_to_merge = 7;
  30. bool FLAGS_verbose = false;
  31. // Path to the database on file system
  32. const std::string kDbName =
  33. ROCKSDB_NAMESPACE::test::PerThreadDBPath("perf_context_test");
  34. namespace ROCKSDB_NAMESPACE {
  35. std::shared_ptr<DB> OpenDb(bool read_only = false) {
  36. DB* db;
  37. Options options;
  38. options.create_if_missing = true;
  39. options.max_open_files = -1;
  40. options.write_buffer_size = FLAGS_write_buffer_size;
  41. options.max_write_buffer_number = FLAGS_max_write_buffer_number;
  42. options.min_write_buffer_number_to_merge =
  43. FLAGS_min_write_buffer_number_to_merge;
  44. if (FLAGS_use_set_based_memetable) {
  45. options.prefix_extractor.reset(
  46. ROCKSDB_NAMESPACE::NewFixedPrefixTransform(0));
  47. options.memtable_factory.reset(NewHashSkipListRepFactory());
  48. }
  49. Status s;
  50. if (!read_only) {
  51. s = DB::Open(options, kDbName, &db);
  52. } else {
  53. s = DB::OpenForReadOnly(options, kDbName, &db);
  54. }
  55. EXPECT_OK(s);
  56. return std::shared_ptr<DB>(db);
  57. }
  58. class PerfContextTest : public testing::Test {};
  59. TEST_F(PerfContextTest, SeekIntoDeletion) {
  60. ASSERT_OK(DestroyDB(kDbName, Options()));
  61. auto db = OpenDb();
  62. WriteOptions write_options;
  63. ReadOptions read_options;
  64. for (int i = 0; i < FLAGS_total_keys; ++i) {
  65. std::string key = "k" + std::to_string(i);
  66. std::string value = "v" + std::to_string(i);
  67. ASSERT_OK(db->Put(write_options, key, value));
  68. }
  69. for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
  70. std::string key = "k" + std::to_string(i);
  71. ASSERT_OK(db->Delete(write_options, key));
  72. }
  73. HistogramImpl hist_get;
  74. HistogramImpl hist_get_time;
  75. for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
  76. std::string key = "k" + std::to_string(i);
  77. std::string value;
  78. get_perf_context()->Reset();
  79. StopWatchNano timer(SystemClock::Default().get());
  80. timer.Start();
  81. auto status = db->Get(read_options, key, &value);
  82. auto elapsed_nanos = timer.ElapsedNanos();
  83. ASSERT_TRUE(status.IsNotFound());
  84. hist_get.Add(get_perf_context()->user_key_comparison_count);
  85. hist_get_time.Add(elapsed_nanos);
  86. }
  87. if (FLAGS_verbose) {
  88. std::cout << "Get user key comparison: \n"
  89. << hist_get.ToString() << "Get time: \n"
  90. << hist_get_time.ToString();
  91. }
  92. {
  93. HistogramImpl hist_seek_to_first;
  94. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  95. get_perf_context()->Reset();
  96. StopWatchNano timer(SystemClock::Default().get(), true);
  97. iter->SeekToFirst();
  98. hist_seek_to_first.Add(get_perf_context()->user_key_comparison_count);
  99. auto elapsed_nanos = timer.ElapsedNanos();
  100. if (FLAGS_verbose) {
  101. std::cout << "SeekToFirst user key comparison: \n"
  102. << hist_seek_to_first.ToString() << "ikey skipped: "
  103. << get_perf_context()->internal_key_skipped_count << "\n"
  104. << "idelete skipped: "
  105. << get_perf_context()->internal_delete_skipped_count << "\n"
  106. << "elapsed: " << elapsed_nanos << "\n";
  107. }
  108. }
  109. HistogramImpl hist_seek;
  110. for (int i = 0; i < FLAGS_total_keys; ++i) {
  111. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  112. std::string key = "k" + std::to_string(i);
  113. get_perf_context()->Reset();
  114. StopWatchNano timer(SystemClock::Default().get(), true);
  115. iter->Seek(key);
  116. auto elapsed_nanos = timer.ElapsedNanos();
  117. hist_seek.Add(get_perf_context()->user_key_comparison_count);
  118. if (FLAGS_verbose) {
  119. std::cout << "seek cmp: " << get_perf_context()->user_key_comparison_count
  120. << " ikey skipped "
  121. << get_perf_context()->internal_key_skipped_count
  122. << " idelete skipped "
  123. << get_perf_context()->internal_delete_skipped_count
  124. << " elapsed: " << elapsed_nanos << "ns\n";
  125. }
  126. get_perf_context()->Reset();
  127. ASSERT_TRUE(iter->Valid());
  128. StopWatchNano timer2(SystemClock::Default().get(), true);
  129. iter->Next();
  130. ASSERT_OK(iter->status());
  131. auto elapsed_nanos2 = timer2.ElapsedNanos();
  132. if (FLAGS_verbose) {
  133. std::cout << "next cmp: " << get_perf_context()->user_key_comparison_count
  134. << "elapsed: " << elapsed_nanos2 << "ns\n";
  135. }
  136. }
  137. if (FLAGS_verbose) {
  138. std::cout << "Seek user key comparison: \n" << hist_seek.ToString();
  139. }
  140. }
  141. TEST_F(PerfContextTest, StopWatchNanoOverhead) {
  142. // profile the timer cost by itself!
  143. const int kTotalIterations = 1000000;
  144. std::vector<uint64_t> timings(kTotalIterations);
  145. StopWatchNano timer(SystemClock::Default().get(), true);
  146. for (auto& timing : timings) {
  147. timing = timer.ElapsedNanos(true /* reset */);
  148. }
  149. HistogramImpl histogram;
  150. for (const auto timing : timings) {
  151. histogram.Add(timing);
  152. }
  153. if (FLAGS_verbose) {
  154. std::cout << histogram.ToString();
  155. }
  156. }
  157. TEST_F(PerfContextTest, StopWatchOverhead) {
  158. // profile the timer cost by itself!
  159. const int kTotalIterations = 1000000;
  160. uint64_t elapsed = 0;
  161. std::vector<uint64_t> timings(kTotalIterations);
  162. StopWatch timer(SystemClock::Default().get(), nullptr, 0,
  163. Histograms::HISTOGRAM_ENUM_MAX, &elapsed);
  164. for (auto& timing : timings) {
  165. timing = elapsed;
  166. }
  167. HistogramImpl histogram;
  168. uint64_t prev_timing = 0;
  169. for (const auto timing : timings) {
  170. histogram.Add(timing - prev_timing);
  171. prev_timing = timing;
  172. }
  173. if (FLAGS_verbose) {
  174. std::cout << histogram.ToString();
  175. }
  176. }
  177. void ProfileQueries(bool enabled_time = false) {
  178. ASSERT_OK(DestroyDB(kDbName, Options())); // Start this test with a fresh DB
  179. auto db = OpenDb();
  180. WriteOptions write_options;
  181. ReadOptions read_options;
  182. HistogramImpl hist_put;
  183. HistogramImpl hist_get;
  184. HistogramImpl hist_get_snapshot;
  185. HistogramImpl hist_get_memtable;
  186. HistogramImpl hist_get_files;
  187. HistogramImpl hist_get_post_process;
  188. HistogramImpl hist_num_memtable_checked;
  189. HistogramImpl hist_mget;
  190. HistogramImpl hist_mget_snapshot;
  191. HistogramImpl hist_mget_memtable;
  192. HistogramImpl hist_mget_files;
  193. HistogramImpl hist_mget_post_process;
  194. HistogramImpl hist_mget_num_memtable_checked;
  195. HistogramImpl hist_write_pre_post;
  196. HistogramImpl hist_write_wal_time;
  197. HistogramImpl hist_write_memtable_time;
  198. HistogramImpl hist_write_delay_time;
  199. HistogramImpl hist_write_thread_wait_nanos;
  200. HistogramImpl hist_write_scheduling_time;
  201. uint64_t total_db_mutex_nanos = 0;
  202. if (FLAGS_verbose) {
  203. std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
  204. }
  205. std::vector<int> keys;
  206. const int kFlushFlag = -1;
  207. for (int i = 0; i < FLAGS_total_keys; ++i) {
  208. keys.push_back(i);
  209. if (i == FLAGS_total_keys / 2) {
  210. // Issuing a flush in the middle.
  211. keys.push_back(kFlushFlag);
  212. }
  213. }
  214. if (FLAGS_random_key) {
  215. RandomShuffle(std::begin(keys), std::end(keys));
  216. }
  217. #ifndef NDEBUG
  218. ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 1U);
  219. #endif
  220. int num_mutex_waited = 0;
  221. for (const int i : keys) {
  222. if (i == kFlushFlag) {
  223. FlushOptions fo;
  224. ASSERT_OK(db->Flush(fo));
  225. continue;
  226. }
  227. std::string key = "k" + std::to_string(i);
  228. std::string value = "v" + std::to_string(i);
  229. std::vector<std::string> values;
  230. get_perf_context()->Reset();
  231. ASSERT_OK(db->Put(write_options, key, value));
  232. if (++num_mutex_waited > 3) {
  233. #ifndef NDEBUG
  234. ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
  235. #endif
  236. }
  237. hist_write_pre_post.Add(
  238. get_perf_context()->write_pre_and_post_process_time);
  239. hist_write_wal_time.Add(get_perf_context()->write_wal_time);
  240. hist_write_memtable_time.Add(get_perf_context()->write_memtable_time);
  241. hist_write_delay_time.Add(get_perf_context()->write_delay_time);
  242. hist_write_thread_wait_nanos.Add(
  243. get_perf_context()->write_thread_wait_nanos);
  244. hist_write_scheduling_time.Add(
  245. get_perf_context()->write_scheduling_flushes_compactions_time);
  246. hist_put.Add(get_perf_context()->user_key_comparison_count);
  247. total_db_mutex_nanos += get_perf_context()->db_mutex_lock_nanos;
  248. }
  249. #ifndef NDEBUG
  250. ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
  251. #endif
  252. for (const int i : keys) {
  253. if (i == kFlushFlag) {
  254. continue;
  255. }
  256. std::string key = "k" + std::to_string(i);
  257. std::string expected_value = "v" + std::to_string(i);
  258. std::string value;
  259. std::vector<Slice> multiget_keys = {Slice(key)};
  260. std::vector<std::string> values;
  261. get_perf_context()->Reset();
  262. ASSERT_OK(db->Get(read_options, key, &value));
  263. ASSERT_EQ(expected_value, value);
  264. hist_get_snapshot.Add(get_perf_context()->get_snapshot_time);
  265. hist_get_memtable.Add(get_perf_context()->get_from_memtable_time);
  266. hist_get_files.Add(get_perf_context()->get_from_output_files_time);
  267. hist_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
  268. hist_get_post_process.Add(get_perf_context()->get_post_process_time);
  269. hist_get.Add(get_perf_context()->user_key_comparison_count);
  270. get_perf_context()->Reset();
  271. auto statuses = db->MultiGet(read_options, multiget_keys, &values);
  272. for (const auto& s : statuses) {
  273. ASSERT_OK(s);
  274. }
  275. hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
  276. hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
  277. hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
  278. hist_mget_num_memtable_checked.Add(
  279. get_perf_context()->get_from_memtable_count);
  280. hist_mget_post_process.Add(get_perf_context()->get_post_process_time);
  281. hist_mget.Add(get_perf_context()->user_key_comparison_count);
  282. }
  283. if (FLAGS_verbose) {
  284. std::cout << "Put user key comparison: \n"
  285. << hist_put.ToString() << "Get user key comparison: \n"
  286. << hist_get.ToString() << "MultiGet user key comparison: \n"
  287. << hist_get.ToString();
  288. std::cout << "Put(): Pre and Post Process Time: \n"
  289. << hist_write_pre_post.ToString() << " Writing WAL time: \n"
  290. << hist_write_wal_time.ToString() << "\n"
  291. << " Writing Mem Table time: \n"
  292. << hist_write_memtable_time.ToString() << "\n"
  293. << " Write Delay: \n"
  294. << hist_write_delay_time.ToString() << "\n"
  295. << " Waiting for Batch time: \n"
  296. << hist_write_thread_wait_nanos.ToString() << "\n"
  297. << " Scheduling Flushes and Compactions Time: \n"
  298. << hist_write_scheduling_time.ToString() << "\n"
  299. << " Total DB mutex nanos: \n"
  300. << total_db_mutex_nanos << "\n";
  301. std::cout << "Get(): Time to get snapshot: \n"
  302. << hist_get_snapshot.ToString()
  303. << " Time to get value from memtables: \n"
  304. << hist_get_memtable.ToString() << "\n"
  305. << " Time to get value from output files: \n"
  306. << hist_get_files.ToString() << "\n"
  307. << " Number of memtables checked: \n"
  308. << hist_num_memtable_checked.ToString() << "\n"
  309. << " Time to post process: \n"
  310. << hist_get_post_process.ToString() << "\n";
  311. std::cout << "MultiGet(): Time to get snapshot: \n"
  312. << hist_mget_snapshot.ToString()
  313. << " Time to get value from memtables: \n"
  314. << hist_mget_memtable.ToString() << "\n"
  315. << " Time to get value from output files: \n"
  316. << hist_mget_files.ToString() << "\n"
  317. << " Number of memtables checked: \n"
  318. << hist_mget_num_memtable_checked.ToString() << "\n"
  319. << " Time to post process: \n"
  320. << hist_mget_post_process.ToString() << "\n";
  321. }
  322. if (enabled_time) {
  323. ASSERT_GT(hist_get.Average(), 0);
  324. ASSERT_GT(hist_get_snapshot.Average(), 0);
  325. ASSERT_GT(hist_get_memtable.Average(), 0);
  326. ASSERT_GT(hist_get_files.Average(), 0);
  327. ASSERT_GT(hist_get_post_process.Average(), 0);
  328. ASSERT_GT(hist_num_memtable_checked.Average(), 0);
  329. ASSERT_GT(hist_mget.Average(), 0);
  330. ASSERT_GT(hist_mget_snapshot.Average(), 0);
  331. ASSERT_GT(hist_mget_memtable.Average(), 0);
  332. ASSERT_GT(hist_mget_files.Average(), 0);
  333. ASSERT_GT(hist_mget_post_process.Average(), 0);
  334. ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
  335. EXPECT_GT(hist_write_pre_post.Average(), 0);
  336. EXPECT_GT(hist_write_wal_time.Average(), 0);
  337. EXPECT_GT(hist_write_memtable_time.Average(), 0);
  338. EXPECT_EQ(hist_write_delay_time.Average(), 0);
  339. EXPECT_EQ(hist_write_thread_wait_nanos.Average(), 0);
  340. EXPECT_GT(hist_write_scheduling_time.Average(), 0);
  341. #ifndef NDEBUG
  342. ASSERT_LT(total_db_mutex_nanos, 100U);
  343. #endif
  344. }
  345. db.reset();
  346. db = OpenDb(true);
  347. hist_get.Clear();
  348. hist_get_snapshot.Clear();
  349. hist_get_memtable.Clear();
  350. hist_get_files.Clear();
  351. hist_get_post_process.Clear();
  352. hist_num_memtable_checked.Clear();
  353. hist_mget.Clear();
  354. hist_mget_snapshot.Clear();
  355. hist_mget_memtable.Clear();
  356. hist_mget_files.Clear();
  357. hist_mget_post_process.Clear();
  358. hist_mget_num_memtable_checked.Clear();
  359. for (const int i : keys) {
  360. if (i == kFlushFlag) {
  361. continue;
  362. }
  363. std::string key = "k" + std::to_string(i);
  364. std::string expected_value = "v" + std::to_string(i);
  365. std::string value;
  366. std::vector<Slice> multiget_keys = {Slice(key)};
  367. std::vector<std::string> values;
  368. get_perf_context()->Reset();
  369. ASSERT_OK(db->Get(read_options, key, &value));
  370. ASSERT_EQ(expected_value, value);
  371. hist_get_snapshot.Add(get_perf_context()->get_snapshot_time);
  372. hist_get_memtable.Add(get_perf_context()->get_from_memtable_time);
  373. hist_get_files.Add(get_perf_context()->get_from_output_files_time);
  374. hist_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
  375. hist_get_post_process.Add(get_perf_context()->get_post_process_time);
  376. hist_get.Add(get_perf_context()->user_key_comparison_count);
  377. get_perf_context()->Reset();
  378. auto statuses = db->MultiGet(read_options, multiget_keys, &values);
  379. for (const auto& s : statuses) {
  380. ASSERT_OK(s);
  381. }
  382. hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
  383. hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
  384. hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
  385. hist_mget_num_memtable_checked.Add(
  386. get_perf_context()->get_from_memtable_count);
  387. hist_mget_post_process.Add(get_perf_context()->get_post_process_time);
  388. hist_mget.Add(get_perf_context()->user_key_comparison_count);
  389. }
  390. if (FLAGS_verbose) {
  391. std::cout << "ReadOnly Get user key comparison: \n"
  392. << hist_get.ToString()
  393. << "ReadOnly MultiGet user key comparison: \n"
  394. << hist_mget.ToString();
  395. std::cout << "ReadOnly Get(): Time to get snapshot: \n"
  396. << hist_get_snapshot.ToString()
  397. << " Time to get value from memtables: \n"
  398. << hist_get_memtable.ToString() << "\n"
  399. << " Time to get value from output files: \n"
  400. << hist_get_files.ToString() << "\n"
  401. << " Number of memtables checked: \n"
  402. << hist_num_memtable_checked.ToString() << "\n"
  403. << " Time to post process: \n"
  404. << hist_get_post_process.ToString() << "\n";
  405. std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n"
  406. << hist_mget_snapshot.ToString()
  407. << " Time to get value from memtables: \n"
  408. << hist_mget_memtable.ToString() << "\n"
  409. << " Time to get value from output files: \n"
  410. << hist_mget_files.ToString() << "\n"
  411. << " Number of memtables checked: \n"
  412. << hist_mget_num_memtable_checked.ToString() << "\n"
  413. << " Time to post process: \n"
  414. << hist_mget_post_process.ToString() << "\n";
  415. }
  416. if (enabled_time) {
  417. ASSERT_GT(hist_get.Average(), 0);
  418. ASSERT_GT(hist_get_memtable.Average(), 0);
  419. ASSERT_GT(hist_get_files.Average(), 0);
  420. ASSERT_GT(hist_num_memtable_checked.Average(), 0);
  421. // In read-only mode Get(), no super version operation is needed
  422. ASSERT_EQ(hist_get_post_process.Average(), 0);
  423. ASSERT_GT(hist_get_snapshot.Average(), 0);
  424. ASSERT_GT(hist_mget.Average(), 0);
  425. ASSERT_GT(hist_mget_snapshot.Average(), 0);
  426. ASSERT_GT(hist_mget_memtable.Average(), 0);
  427. ASSERT_GT(hist_mget_files.Average(), 0);
  428. ASSERT_GT(hist_mget_post_process.Average(), 0);
  429. ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
  430. }
  431. }
  432. TEST_F(PerfContextTest, KeyComparisonCount) {
  433. SetPerfLevel(kEnableCount);
  434. ProfileQueries();
  435. SetPerfLevel(kDisable);
  436. ProfileQueries();
  437. SetPerfLevel(kEnableTime);
  438. ProfileQueries(true);
  439. }
  440. // make perf_context_test
  441. // export ROCKSDB_TESTS=PerfContextTest.SeekKeyComparison
  442. // For one memtable:
  443. // ./perf_context_test --write_buffer_size=500000 --total_keys=10000
  444. // For two memtables:
  445. // ./perf_context_test --write_buffer_size=250000 --total_keys=10000
  446. // Specify --random_key=1 to shuffle the key before insertion
  447. // Results show that, for sequential insertion, worst-case Seek Key comparison
  448. // is close to the total number of keys (linear), when there is only one
  449. // memtable. When there are two memtables, even the avg Seek Key comparison
  450. // starts to become linear to the input size.
  451. TEST_F(PerfContextTest, SeekKeyComparison) {
  452. ASSERT_OK(DestroyDB(kDbName, Options()));
  453. auto db = OpenDb();
  454. WriteOptions write_options;
  455. ReadOptions read_options;
  456. if (FLAGS_verbose) {
  457. std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
  458. }
  459. std::vector<int> keys;
  460. for (int i = 0; i < FLAGS_total_keys; ++i) {
  461. keys.push_back(i);
  462. }
  463. if (FLAGS_random_key) {
  464. RandomShuffle(std::begin(keys), std::end(keys));
  465. }
  466. HistogramImpl hist_put_time;
  467. HistogramImpl hist_wal_time;
  468. HistogramImpl hist_time_diff;
  469. SetPerfLevel(kEnableTime);
  470. StopWatchNano timer(SystemClock::Default().get());
  471. for (const int i : keys) {
  472. std::string key = "k" + std::to_string(i);
  473. std::string value = "v" + std::to_string(i);
  474. get_perf_context()->Reset();
  475. timer.Start();
  476. ASSERT_OK(db->Put(write_options, key, value));
  477. auto put_time = timer.ElapsedNanos();
  478. hist_put_time.Add(put_time);
  479. hist_wal_time.Add(get_perf_context()->write_wal_time);
  480. hist_time_diff.Add(put_time - get_perf_context()->write_wal_time);
  481. }
  482. if (FLAGS_verbose) {
  483. std::cout << "Put time:\n"
  484. << hist_put_time.ToString() << "WAL time:\n"
  485. << hist_wal_time.ToString() << "time diff:\n"
  486. << hist_time_diff.ToString();
  487. }
  488. HistogramImpl hist_seek;
  489. HistogramImpl hist_next;
  490. for (int i = 0; i < FLAGS_total_keys; ++i) {
  491. std::string key = "k" + std::to_string(i);
  492. std::string value = "v" + std::to_string(i);
  493. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  494. get_perf_context()->Reset();
  495. iter->Seek(key);
  496. ASSERT_TRUE(iter->Valid());
  497. ASSERT_EQ(iter->value().ToString(), value);
  498. hist_seek.Add(get_perf_context()->user_key_comparison_count);
  499. }
  500. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  501. for (iter->SeekToFirst(); iter->Valid();) {
  502. get_perf_context()->Reset();
  503. iter->Next();
  504. hist_next.Add(get_perf_context()->user_key_comparison_count);
  505. }
  506. ASSERT_OK(iter->status());
  507. if (FLAGS_verbose) {
  508. std::cout << "Seek:\n"
  509. << hist_seek.ToString() << "Next:\n"
  510. << hist_next.ToString();
  511. }
  512. }
  513. TEST_F(PerfContextTest, DBMutexLockCounter) {
  514. int stats_code[] = {0, static_cast<int>(DB_MUTEX_WAIT_MICROS)};
  515. for (PerfLevel perf_level_test :
  516. {PerfLevel::kEnableTimeExceptForMutex, PerfLevel::kEnableTime}) {
  517. for (int c = 0; c < 2; ++c) {
  518. InstrumentedMutex mutex(nullptr, SystemClock::Default().get(),
  519. stats_code[c]);
  520. mutex.Lock();
  521. ROCKSDB_NAMESPACE::port::Thread child_thread([&] {
  522. SetPerfLevel(perf_level_test);
  523. get_perf_context()->Reset();
  524. ASSERT_EQ(get_perf_context()->db_mutex_lock_nanos, 0);
  525. mutex.Lock();
  526. mutex.Unlock();
  527. if (perf_level_test == PerfLevel::kEnableTimeExceptForMutex ||
  528. stats_code[c] != DB_MUTEX_WAIT_MICROS) {
  529. ASSERT_EQ(get_perf_context()->db_mutex_lock_nanos, 0);
  530. } else {
  531. // increment the counter only when it's a DB Mutex
  532. ASSERT_GT(get_perf_context()->db_mutex_lock_nanos, 0);
  533. }
  534. });
  535. SystemClock::Default()->SleepForMicroseconds(100);
  536. mutex.Unlock();
  537. child_thread.join();
  538. }
  539. }
  540. }
  541. TEST_F(PerfContextTest, FalseDBMutexWait) {
  542. SetPerfLevel(kEnableTime);
  543. int stats_code[] = {0, static_cast<int>(DB_MUTEX_WAIT_MICROS)};
  544. for (int c = 0; c < 2; ++c) {
  545. InstrumentedMutex mutex(nullptr, SystemClock::Default().get(),
  546. stats_code[c]);
  547. InstrumentedCondVar lock(&mutex);
  548. get_perf_context()->Reset();
  549. mutex.Lock();
  550. lock.TimedWait(100);
  551. mutex.Unlock();
  552. if (stats_code[c] == static_cast<int>(DB_MUTEX_WAIT_MICROS)) {
  553. // increment the counter only when it's a DB Mutex
  554. ASSERT_GT(get_perf_context()->db_condition_wait_nanos, 0);
  555. } else {
  556. ASSERT_EQ(get_perf_context()->db_condition_wait_nanos, 0);
  557. }
  558. }
  559. }
  560. TEST_F(PerfContextTest, ToString) {
  561. get_perf_context()->Reset();
  562. get_perf_context()->block_read_count = 12345;
  563. std::string zero_included = get_perf_context()->ToString();
  564. ASSERT_NE(std::string::npos, zero_included.find("= 0"));
  565. ASSERT_NE(std::string::npos, zero_included.find("= 12345"));
  566. std::string zero_excluded = get_perf_context()->ToString(true);
  567. ASSERT_EQ(std::string::npos, zero_excluded.find("= 0"));
  568. ASSERT_NE(std::string::npos, zero_excluded.find("= 12345"));
  569. }
  570. TEST_F(PerfContextTest, MergeOperatorTime) {
  571. ASSERT_OK(DestroyDB(kDbName, Options()));
  572. DB* db;
  573. Options options;
  574. options.create_if_missing = true;
  575. options.merge_operator = MergeOperators::CreateStringAppendOperator();
  576. Status s = DB::Open(options, kDbName, &db);
  577. EXPECT_OK(s);
  578. std::string val;
  579. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val1"));
  580. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val2"));
  581. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val3"));
  582. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val4"));
  583. SetPerfLevel(kEnableTime);
  584. get_perf_context()->Reset();
  585. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  586. #ifdef OS_SOLARIS
  587. for (int i = 0; i < 100; i++) {
  588. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  589. }
  590. #endif
  591. EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
  592. ASSERT_OK(db->Flush(FlushOptions()));
  593. get_perf_context()->Reset();
  594. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  595. #ifdef OS_SOLARIS
  596. for (int i = 0; i < 100; i++) {
  597. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  598. }
  599. #endif
  600. EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
  601. ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  602. get_perf_context()->Reset();
  603. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  604. #ifdef OS_SOLARIS
  605. for (int i = 0; i < 100; i++) {
  606. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  607. }
  608. #endif
  609. EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
  610. delete db;
  611. }
  612. TEST_F(PerfContextTest, CopyAndMove) {
  613. // Assignment operator
  614. {
  615. get_perf_context()->Reset();
  616. get_perf_context()->EnablePerLevelPerfContext();
  617. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  618. ASSERT_EQ(
  619. 1,
  620. (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  621. PerfContext perf_context_assign;
  622. perf_context_assign = *get_perf_context();
  623. ASSERT_EQ(
  624. 1,
  625. (*(perf_context_assign.level_to_perf_context))[5].bloom_filter_useful);
  626. get_perf_context()->ClearPerLevelPerfContext();
  627. get_perf_context()->Reset();
  628. ASSERT_EQ(
  629. 1,
  630. (*(perf_context_assign.level_to_perf_context))[5].bloom_filter_useful);
  631. perf_context_assign.ClearPerLevelPerfContext();
  632. perf_context_assign.Reset();
  633. }
  634. // Copy constructor
  635. {
  636. get_perf_context()->Reset();
  637. get_perf_context()->EnablePerLevelPerfContext();
  638. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  639. ASSERT_EQ(
  640. 1,
  641. (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  642. PerfContext perf_context_copy(*get_perf_context());
  643. ASSERT_EQ(
  644. 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful);
  645. get_perf_context()->ClearPerLevelPerfContext();
  646. get_perf_context()->Reset();
  647. ASSERT_EQ(
  648. 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful);
  649. perf_context_copy.ClearPerLevelPerfContext();
  650. perf_context_copy.Reset();
  651. }
  652. // Move constructor
  653. {
  654. get_perf_context()->Reset();
  655. get_perf_context()->EnablePerLevelPerfContext();
  656. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  657. ASSERT_EQ(
  658. 1,
  659. (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  660. PerfContext perf_context_move = std::move(*get_perf_context());
  661. ASSERT_EQ(
  662. 1, (*(perf_context_move.level_to_perf_context))[5].bloom_filter_useful);
  663. get_perf_context()->ClearPerLevelPerfContext();
  664. get_perf_context()->Reset();
  665. ASSERT_EQ(
  666. 1, (*(perf_context_move.level_to_perf_context))[5].bloom_filter_useful);
  667. perf_context_move.ClearPerLevelPerfContext();
  668. perf_context_move.Reset();
  669. }
  670. }
  671. TEST_F(PerfContextTest, PerfContextDisableEnable) {
  672. get_perf_context()->Reset();
  673. get_perf_context()->EnablePerLevelPerfContext();
  674. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, 0);
  675. get_perf_context()->DisablePerLevelPerfContext();
  676. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  677. get_perf_context()->EnablePerLevelPerfContext();
  678. PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1, 0);
  679. get_perf_context()->DisablePerLevelPerfContext();
  680. PerfContext perf_context_copy(*get_perf_context());
  681. ASSERT_EQ(1, (*(perf_context_copy.level_to_perf_context))[0]
  682. .bloom_filter_full_positive);
  683. // this was set when per level perf context is disabled, should not be copied
  684. ASSERT_NE(
  685. 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful);
  686. ASSERT_EQ(
  687. 1, (*(perf_context_copy.level_to_perf_context))[0].block_cache_hit_count);
  688. perf_context_copy.ClearPerLevelPerfContext();
  689. perf_context_copy.Reset();
  690. get_perf_context()->ClearPerLevelPerfContext();
  691. get_perf_context()->Reset();
  692. }
  693. TEST_F(PerfContextTest, PerfContextByLevelGetSet) {
  694. get_perf_context()->Reset();
  695. get_perf_context()->EnablePerLevelPerfContext();
  696. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, 0);
  697. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  698. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 7);
  699. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 7);
  700. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1, 2);
  701. PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1, 0);
  702. PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 5, 2);
  703. PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 2, 3);
  704. PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 4, 1);
  705. ASSERT_EQ(
  706. 0, (*(get_perf_context()->level_to_perf_context))[0].bloom_filter_useful);
  707. ASSERT_EQ(
  708. 1, (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  709. ASSERT_EQ(
  710. 2, (*(get_perf_context()->level_to_perf_context))[7].bloom_filter_useful);
  711. ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[0]
  712. .bloom_filter_full_positive);
  713. ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[2]
  714. .bloom_filter_full_true_positive);
  715. ASSERT_EQ(
  716. 1,
  717. (*(get_perf_context()->level_to_perf_context))[0].block_cache_hit_count);
  718. ASSERT_EQ(
  719. 5,
  720. (*(get_perf_context()->level_to_perf_context))[2].block_cache_hit_count);
  721. ASSERT_EQ(
  722. 2,
  723. (*(get_perf_context()->level_to_perf_context))[3].block_cache_miss_count);
  724. ASSERT_EQ(
  725. 4,
  726. (*(get_perf_context()->level_to_perf_context))[1].block_cache_miss_count);
  727. std::string zero_excluded = get_perf_context()->ToString(true);
  728. ASSERT_NE(std::string::npos,
  729. zero_excluded.find("bloom_filter_useful = 1@level5, 2@level7"));
  730. ASSERT_NE(std::string::npos,
  731. zero_excluded.find("bloom_filter_full_positive = 1@level0"));
  732. ASSERT_NE(std::string::npos,
  733. zero_excluded.find("bloom_filter_full_true_positive = 1@level2"));
  734. ASSERT_NE(std::string::npos,
  735. zero_excluded.find("block_cache_hit_count = 1@level0, 5@level2"));
  736. ASSERT_NE(std::string::npos,
  737. zero_excluded.find("block_cache_miss_count = 4@level1, 2@level3"));
  738. }
  739. TEST_F(PerfContextTest, CPUTimer) {
  740. if (SystemClock::Default()->CPUNanos() == 0) {
  741. ROCKSDB_GTEST_SKIP("Target without CPUNanos support");
  742. return;
  743. }
  744. ASSERT_OK(DestroyDB(kDbName, Options()));
  745. auto db = OpenDb();
  746. WriteOptions write_options;
  747. ReadOptions read_options;
  748. SetPerfLevel(PerfLevel::kEnableTimeAndCPUTimeExceptForMutex);
  749. std::string max_str = "0";
  750. for (int i = 0; i < FLAGS_total_keys; ++i) {
  751. std::string i_str = std::to_string(i);
  752. std::string key = "k" + i_str;
  753. std::string value = "v" + i_str;
  754. max_str = max_str > i_str ? max_str : i_str;
  755. ASSERT_OK(db->Put(write_options, key, value));
  756. }
  757. std::string last_key = "k" + max_str;
  758. std::string last_value = "v" + max_str;
  759. {
  760. // Get
  761. get_perf_context()->Reset();
  762. std::string value;
  763. ASSERT_OK(db->Get(read_options, "k0", &value));
  764. ASSERT_EQ(value, "v0");
  765. if (FLAGS_verbose) {
  766. std::cout << "Get CPU time nanos: " << get_perf_context()->get_cpu_nanos
  767. << "ns\n";
  768. }
  769. // Iter
  770. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  771. // Seek
  772. get_perf_context()->Reset();
  773. iter->Seek(last_key);
  774. ASSERT_TRUE(iter->Valid());
  775. ASSERT_EQ(last_value, iter->value().ToString());
  776. if (FLAGS_verbose) {
  777. std::cout << "Iter Seek CPU time nanos: "
  778. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  779. }
  780. // SeekForPrev
  781. get_perf_context()->Reset();
  782. iter->SeekForPrev(last_key);
  783. ASSERT_TRUE(iter->Valid());
  784. if (FLAGS_verbose) {
  785. std::cout << "Iter SeekForPrev CPU time nanos: "
  786. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  787. }
  788. // SeekToLast
  789. get_perf_context()->Reset();
  790. iter->SeekToLast();
  791. ASSERT_TRUE(iter->Valid());
  792. ASSERT_EQ(last_value, iter->value().ToString());
  793. if (FLAGS_verbose) {
  794. std::cout << "Iter SeekToLast CPU time nanos: "
  795. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  796. }
  797. // SeekToFirst
  798. get_perf_context()->Reset();
  799. iter->SeekToFirst();
  800. ASSERT_TRUE(iter->Valid());
  801. ASSERT_EQ("v0", iter->value().ToString());
  802. if (FLAGS_verbose) {
  803. std::cout << "Iter SeekToFirst CPU time nanos: "
  804. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  805. }
  806. // Next
  807. get_perf_context()->Reset();
  808. iter->Next();
  809. ASSERT_TRUE(iter->Valid());
  810. ASSERT_EQ("v1", iter->value().ToString());
  811. if (FLAGS_verbose) {
  812. std::cout << "Iter Next CPU time nanos: "
  813. << get_perf_context()->iter_next_cpu_nanos << "ns\n";
  814. }
  815. // Prev
  816. get_perf_context()->Reset();
  817. iter->Prev();
  818. ASSERT_TRUE(iter->Valid());
  819. ASSERT_EQ("v0", iter->value().ToString());
  820. if (FLAGS_verbose) {
  821. std::cout << "Iter Prev CPU time nanos: "
  822. << get_perf_context()->iter_prev_cpu_nanos << "ns\n";
  823. }
  824. // monotonically increasing
  825. get_perf_context()->Reset();
  826. uint64_t count = get_perf_context()->iter_seek_cpu_nanos;
  827. uint64_t before_count = count;
  828. for (int i = 0; i < FLAGS_total_keys; ++i) {
  829. iter->Seek("k" + std::to_string(i));
  830. ASSERT_TRUE(iter->Valid());
  831. ASSERT_EQ("v" + std::to_string(i), iter->value().ToString());
  832. auto next_count = get_perf_context()->iter_seek_cpu_nanos;
  833. ASSERT_GE(next_count, count);
  834. count = next_count;
  835. }
  836. ASSERT_GT(count, before_count);
  837. // iterator creation/destruction; multiple iterators
  838. {
  839. std::unique_ptr<Iterator> iter2(db->NewIterator(read_options));
  840. ASSERT_EQ(count, get_perf_context()->iter_seek_cpu_nanos);
  841. iter2->Seek(last_key);
  842. ASSERT_TRUE(iter2->Valid());
  843. ASSERT_EQ(last_value, iter2->value().ToString());
  844. ASSERT_GT(get_perf_context()->iter_seek_cpu_nanos, count);
  845. count = get_perf_context()->iter_seek_cpu_nanos;
  846. }
  847. ASSERT_EQ(count, get_perf_context()->iter_seek_cpu_nanos);
  848. }
  849. }
  850. TEST_F(PerfContextTest, MergeOperandCount) {
  851. ASSERT_OK(DestroyDB(kDbName, Options()));
  852. DB* db = nullptr;
  853. Options options;
  854. options.create_if_missing = true;
  855. options.merge_operator = MergeOperators::CreateStringAppendOperator();
  856. ASSERT_OK(DB::Open(options, kDbName, &db));
  857. std::unique_ptr<DB> db_guard(db);
  858. constexpr size_t num_keys = 3;
  859. const std::string key_prefix("key");
  860. const std::string value_prefix("value");
  861. std::vector<std::string> keys;
  862. keys.reserve(num_keys);
  863. for (size_t i = 0; i < num_keys; ++i) {
  864. keys.emplace_back(key_prefix + std::to_string(i));
  865. }
  866. // Write three keys with one Put each followed by 1, 2, and 3
  867. // Merge operations respectively.
  868. constexpr size_t total_merges = num_keys * (num_keys + 1) / 2;
  869. std::vector<ManagedSnapshot> snapshots;
  870. snapshots.reserve(total_merges);
  871. for (size_t i = 0; i < num_keys; ++i) {
  872. const std::string suffix = std::to_string(i);
  873. const std::string value = value_prefix + suffix;
  874. ASSERT_OK(db->Put(WriteOptions(), keys[i], value));
  875. for (size_t j = 0; j <= i; ++j) {
  876. // Take a snapshot before each Merge so they are preserved and not
  877. // collapsed during flush.
  878. snapshots.emplace_back(db);
  879. ASSERT_OK(db->Merge(WriteOptions(), keys[i], value + std::to_string(j)));
  880. }
  881. }
  882. auto verify = [&]() {
  883. get_perf_context()->Reset();
  884. for (size_t i = 0; i < num_keys; ++i) {
  885. // Get
  886. {
  887. PinnableSlice result;
  888. ASSERT_OK(db->Get(ReadOptions(), db->DefaultColumnFamily(), keys[i],
  889. &result));
  890. ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count, i + 1);
  891. get_perf_context()->Reset();
  892. }
  893. // GetEntity
  894. {
  895. PinnableWideColumns result;
  896. ASSERT_OK(db->GetEntity(ReadOptions(), db->DefaultColumnFamily(),
  897. keys[i], &result));
  898. ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count, i + 1);
  899. get_perf_context()->Reset();
  900. }
  901. }
  902. {
  903. std::vector<Slice> key_slices;
  904. key_slices.reserve(num_keys);
  905. for (size_t i = 0; i < num_keys; ++i) {
  906. key_slices.emplace_back(keys[i]);
  907. }
  908. // MultiGet
  909. {
  910. std::vector<PinnableSlice> results(num_keys);
  911. std::vector<Status> statuses(num_keys);
  912. db->MultiGet(ReadOptions(), db->DefaultColumnFamily(), num_keys,
  913. key_slices.data(), results.data(), statuses.data());
  914. for (size_t i = 0; i < num_keys; ++i) {
  915. ASSERT_OK(statuses[i]);
  916. }
  917. ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count,
  918. total_merges);
  919. get_perf_context()->Reset();
  920. }
  921. // MultiGetEntity
  922. {
  923. std::vector<PinnableWideColumns> results(num_keys);
  924. std::vector<Status> statuses(num_keys);
  925. db->MultiGetEntity(ReadOptions(), db->DefaultColumnFamily(), num_keys,
  926. key_slices.data(), results.data(), statuses.data());
  927. for (size_t i = 0; i < num_keys; ++i) {
  928. ASSERT_OK(statuses[i]);
  929. }
  930. ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count,
  931. total_merges);
  932. get_perf_context()->Reset();
  933. }
  934. }
  935. std::unique_ptr<Iterator> it(db->NewIterator(ReadOptions()));
  936. // Forward iteration
  937. {
  938. size_t i = 0;
  939. for (it->SeekToFirst(); it->Valid(); it->Next(), ++i) {
  940. ASSERT_EQ(it->key(), keys[i]);
  941. ASSERT_EQ(get_perf_context()->internal_merge_count, i + 1);
  942. get_perf_context()->Reset();
  943. }
  944. ASSERT_OK(it->status());
  945. }
  946. // Backward iteration
  947. {
  948. size_t i = num_keys - 1;
  949. for (it->SeekToLast(); it->Valid(); it->Prev(), --i) {
  950. ASSERT_EQ(it->key(), keys[i]);
  951. ASSERT_EQ(get_perf_context()->internal_merge_count, i + 1);
  952. get_perf_context()->Reset();
  953. }
  954. ASSERT_OK(it->status());
  955. }
  956. };
  957. // Verify counters when reading from memtable
  958. verify();
  959. // Verify counters when reading from table files
  960. ASSERT_OK(db->Flush(FlushOptions()));
  961. verify();
  962. }
  963. TEST_F(PerfContextTest, WriteMemtableTimePerfLevel) {
  964. // Write and check time
  965. ASSERT_OK(DestroyDB(kDbName, Options()));
  966. std::shared_ptr<DB> db = OpenDb();
  967. SetPerfLevel(PerfLevel::kEnableWait);
  968. PerfContext* perf_ctx = get_perf_context();
  969. perf_ctx->Reset();
  970. ASSERT_OK(db->Put(WriteOptions(), "foo1", "bar"));
  971. ASSERT_GT(perf_context.write_memtable_time, 0);
  972. SetPerfLevel(PerfLevel::kEnableCount);
  973. perf_ctx->Reset();
  974. ASSERT_OK(db->Put(WriteOptions(), "foo0", "bar"));
  975. ASSERT_EQ(perf_context.write_memtable_time, 0);
  976. }
  977. } // namespace ROCKSDB_NAMESPACE
  978. int main(int argc, char** argv) {
  979. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  980. ::testing::InitGoogleTest(&argc, argv);
  981. for (int i = 1; i < argc; i++) {
  982. int n;
  983. char junk;
  984. if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
  985. FLAGS_write_buffer_size = n;
  986. }
  987. if (sscanf(argv[i], "--total_keys=%d%c", &n, &junk) == 1) {
  988. FLAGS_total_keys = n;
  989. }
  990. if (sscanf(argv[i], "--random_key=%d%c", &n, &junk) == 1 &&
  991. (n == 0 || n == 1)) {
  992. FLAGS_random_key = n;
  993. }
  994. if (sscanf(argv[i], "--use_set_based_memetable=%d%c", &n, &junk) == 1 &&
  995. (n == 0 || n == 1)) {
  996. FLAGS_use_set_based_memetable = n;
  997. }
  998. if (sscanf(argv[i], "--verbose=%d%c", &n, &junk) == 1 &&
  999. (n == 0 || n == 1)) {
  1000. FLAGS_verbose = n;
  1001. }
  1002. }
  1003. if (FLAGS_verbose) {
  1004. std::cout << kDbName << "\n";
  1005. }
  1006. return RUN_ALL_TESTS();
  1007. }