perf_context_test.cc 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. #include <algorithm>
  7. #include <iostream>
  8. #include <thread>
  9. #include <vector>
  10. #include "monitoring/histogram.h"
  11. #include "monitoring/instrumented_mutex.h"
  12. #include "monitoring/perf_context_imp.h"
  13. #include "monitoring/thread_status_util.h"
  14. #include "port/port.h"
  15. #include "rocksdb/db.h"
  16. #include "rocksdb/memtablerep.h"
  17. #include "rocksdb/perf_context.h"
  18. #include "rocksdb/slice_transform.h"
  19. #include "test_util/testharness.h"
  20. #include "util/stop_watch.h"
  21. #include "util/string_util.h"
  22. #include "utilities/merge_operators.h"
  23. bool FLAGS_random_key = false;
  24. bool FLAGS_use_set_based_memetable = false;
  25. int FLAGS_total_keys = 100;
  26. int FLAGS_write_buffer_size = 1000000000;
  27. int FLAGS_max_write_buffer_number = 8;
  28. int FLAGS_min_write_buffer_number_to_merge = 7;
  29. bool FLAGS_verbose = false;
  30. // Path to the database on file system
  31. const std::string kDbName =
  32. ROCKSDB_NAMESPACE::test::PerThreadDBPath("perf_context_test");
  33. namespace ROCKSDB_NAMESPACE {
  34. std::shared_ptr<DB> OpenDb(bool read_only = false) {
  35. DB* db;
  36. Options options;
  37. options.create_if_missing = true;
  38. options.max_open_files = -1;
  39. options.write_buffer_size = FLAGS_write_buffer_size;
  40. options.max_write_buffer_number = FLAGS_max_write_buffer_number;
  41. options.min_write_buffer_number_to_merge =
  42. FLAGS_min_write_buffer_number_to_merge;
  43. if (FLAGS_use_set_based_memetable) {
  44. #ifndef ROCKSDB_LITE
  45. options.prefix_extractor.reset(
  46. ROCKSDB_NAMESPACE::NewFixedPrefixTransform(0));
  47. options.memtable_factory.reset(NewHashSkipListRepFactory());
  48. #endif // ROCKSDB_LITE
  49. }
  50. Status s;
  51. if (!read_only) {
  52. s = DB::Open(options, kDbName, &db);
  53. } else {
  54. s = DB::OpenForReadOnly(options, kDbName, &db);
  55. }
  56. EXPECT_OK(s);
  57. return std::shared_ptr<DB>(db);
  58. }
  59. class PerfContextTest : public testing::Test {};
  60. TEST_F(PerfContextTest, SeekIntoDeletion) {
  61. DestroyDB(kDbName, Options());
  62. auto db = OpenDb();
  63. WriteOptions write_options;
  64. ReadOptions read_options;
  65. for (int i = 0; i < FLAGS_total_keys; ++i) {
  66. std::string key = "k" + ToString(i);
  67. std::string value = "v" + ToString(i);
  68. db->Put(write_options, key, value);
  69. }
  70. for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
  71. std::string key = "k" + ToString(i);
  72. db->Delete(write_options, key);
  73. }
  74. HistogramImpl hist_get;
  75. HistogramImpl hist_get_time;
  76. for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
  77. std::string key = "k" + ToString(i);
  78. std::string value;
  79. get_perf_context()->Reset();
  80. StopWatchNano timer(Env::Default());
  81. timer.Start();
  82. auto status = db->Get(read_options, key, &value);
  83. auto elapsed_nanos = timer.ElapsedNanos();
  84. ASSERT_TRUE(status.IsNotFound());
  85. hist_get.Add(get_perf_context()->user_key_comparison_count);
  86. hist_get_time.Add(elapsed_nanos);
  87. }
  88. if (FLAGS_verbose) {
  89. std::cout << "Get user key comparison: \n" << hist_get.ToString()
  90. << "Get time: \n" << hist_get_time.ToString();
  91. }
  92. {
  93. HistogramImpl hist_seek_to_first;
  94. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  95. get_perf_context()->Reset();
  96. StopWatchNano timer(Env::Default(), true);
  97. iter->SeekToFirst();
  98. hist_seek_to_first.Add(get_perf_context()->user_key_comparison_count);
  99. auto elapsed_nanos = timer.ElapsedNanos();
  100. if (FLAGS_verbose) {
  101. std::cout << "SeekToFirst uesr key comparison: \n"
  102. << hist_seek_to_first.ToString()
  103. << "ikey skipped: " << get_perf_context()->internal_key_skipped_count
  104. << "\n"
  105. << "idelete skipped: "
  106. << get_perf_context()->internal_delete_skipped_count << "\n"
  107. << "elapsed: " << elapsed_nanos << "\n";
  108. }
  109. }
  110. HistogramImpl hist_seek;
  111. for (int i = 0; i < FLAGS_total_keys; ++i) {
  112. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  113. std::string key = "k" + ToString(i);
  114. get_perf_context()->Reset();
  115. StopWatchNano timer(Env::Default(), true);
  116. iter->Seek(key);
  117. auto elapsed_nanos = timer.ElapsedNanos();
  118. hist_seek.Add(get_perf_context()->user_key_comparison_count);
  119. if (FLAGS_verbose) {
  120. std::cout << "seek cmp: " << get_perf_context()->user_key_comparison_count
  121. << " ikey skipped " << get_perf_context()->internal_key_skipped_count
  122. << " idelete skipped "
  123. << get_perf_context()->internal_delete_skipped_count
  124. << " elapsed: " << elapsed_nanos << "ns\n";
  125. }
  126. get_perf_context()->Reset();
  127. ASSERT_TRUE(iter->Valid());
  128. StopWatchNano timer2(Env::Default(), true);
  129. iter->Next();
  130. auto elapsed_nanos2 = timer2.ElapsedNanos();
  131. if (FLAGS_verbose) {
  132. std::cout << "next cmp: " << get_perf_context()->user_key_comparison_count
  133. << "elapsed: " << elapsed_nanos2 << "ns\n";
  134. }
  135. }
  136. if (FLAGS_verbose) {
  137. std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString();
  138. }
  139. }
  140. TEST_F(PerfContextTest, StopWatchNanoOverhead) {
  141. // profile the timer cost by itself!
  142. const int kTotalIterations = 1000000;
  143. std::vector<uint64_t> timings(kTotalIterations);
  144. StopWatchNano timer(Env::Default(), true);
  145. for (auto& timing : timings) {
  146. timing = timer.ElapsedNanos(true /* reset */);
  147. }
  148. HistogramImpl histogram;
  149. for (const auto timing : timings) {
  150. histogram.Add(timing);
  151. }
  152. if (FLAGS_verbose) {
  153. std::cout << histogram.ToString();
  154. }
  155. }
  156. TEST_F(PerfContextTest, StopWatchOverhead) {
  157. // profile the timer cost by itself!
  158. const int kTotalIterations = 1000000;
  159. uint64_t elapsed = 0;
  160. std::vector<uint64_t> timings(kTotalIterations);
  161. StopWatch timer(Env::Default(), nullptr, 0, &elapsed);
  162. for (auto& timing : timings) {
  163. timing = elapsed;
  164. }
  165. HistogramImpl histogram;
  166. uint64_t prev_timing = 0;
  167. for (const auto timing : timings) {
  168. histogram.Add(timing - prev_timing);
  169. prev_timing = timing;
  170. }
  171. if (FLAGS_verbose) {
  172. std::cout << histogram.ToString();
  173. }
  174. }
  175. void ProfileQueries(bool enabled_time = false) {
  176. DestroyDB(kDbName, Options()); // Start this test with a fresh DB
  177. auto db = OpenDb();
  178. WriteOptions write_options;
  179. ReadOptions read_options;
  180. HistogramImpl hist_put;
  181. HistogramImpl hist_get;
  182. HistogramImpl hist_get_snapshot;
  183. HistogramImpl hist_get_memtable;
  184. HistogramImpl hist_get_files;
  185. HistogramImpl hist_get_post_process;
  186. HistogramImpl hist_num_memtable_checked;
  187. HistogramImpl hist_mget;
  188. HistogramImpl hist_mget_snapshot;
  189. HistogramImpl hist_mget_memtable;
  190. HistogramImpl hist_mget_files;
  191. HistogramImpl hist_mget_post_process;
  192. HistogramImpl hist_mget_num_memtable_checked;
  193. HistogramImpl hist_write_pre_post;
  194. HistogramImpl hist_write_wal_time;
  195. HistogramImpl hist_write_memtable_time;
  196. HistogramImpl hist_write_delay_time;
  197. HistogramImpl hist_write_thread_wait_nanos;
  198. HistogramImpl hist_write_scheduling_time;
  199. uint64_t total_db_mutex_nanos = 0;
  200. if (FLAGS_verbose) {
  201. std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
  202. }
  203. std::vector<int> keys;
  204. const int kFlushFlag = -1;
  205. for (int i = 0; i < FLAGS_total_keys; ++i) {
  206. keys.push_back(i);
  207. if (i == FLAGS_total_keys / 2) {
  208. // Issuing a flush in the middle.
  209. keys.push_back(kFlushFlag);
  210. }
  211. }
  212. if (FLAGS_random_key) {
  213. std::random_shuffle(keys.begin(), keys.end());
  214. }
  215. #ifndef NDEBUG
  216. ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 1U);
  217. #endif
  218. int num_mutex_waited = 0;
  219. for (const int i : keys) {
  220. if (i == kFlushFlag) {
  221. FlushOptions fo;
  222. db->Flush(fo);
  223. continue;
  224. }
  225. std::string key = "k" + ToString(i);
  226. std::string value = "v" + ToString(i);
  227. std::vector<std::string> values;
  228. get_perf_context()->Reset();
  229. db->Put(write_options, key, value);
  230. if (++num_mutex_waited > 3) {
  231. #ifndef NDEBUG
  232. ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
  233. #endif
  234. }
  235. hist_write_pre_post.Add(
  236. get_perf_context()->write_pre_and_post_process_time);
  237. hist_write_wal_time.Add(get_perf_context()->write_wal_time);
  238. hist_write_memtable_time.Add(get_perf_context()->write_memtable_time);
  239. hist_write_delay_time.Add(get_perf_context()->write_delay_time);
  240. hist_write_thread_wait_nanos.Add(
  241. get_perf_context()->write_thread_wait_nanos);
  242. hist_write_scheduling_time.Add(
  243. get_perf_context()->write_scheduling_flushes_compactions_time);
  244. hist_put.Add(get_perf_context()->user_key_comparison_count);
  245. total_db_mutex_nanos += get_perf_context()->db_mutex_lock_nanos;
  246. }
  247. #ifndef NDEBUG
  248. ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
  249. #endif
  250. for (const int i : keys) {
  251. if (i == kFlushFlag) {
  252. continue;
  253. }
  254. std::string key = "k" + ToString(i);
  255. std::string expected_value = "v" + ToString(i);
  256. std::string value;
  257. std::vector<Slice> multiget_keys = {Slice(key)};
  258. std::vector<std::string> values;
  259. get_perf_context()->Reset();
  260. ASSERT_OK(db->Get(read_options, key, &value));
  261. ASSERT_EQ(expected_value, value);
  262. hist_get_snapshot.Add(get_perf_context()->get_snapshot_time);
  263. hist_get_memtable.Add(get_perf_context()->get_from_memtable_time);
  264. hist_get_files.Add(get_perf_context()->get_from_output_files_time);
  265. hist_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
  266. hist_get_post_process.Add(get_perf_context()->get_post_process_time);
  267. hist_get.Add(get_perf_context()->user_key_comparison_count);
  268. get_perf_context()->Reset();
  269. db->MultiGet(read_options, multiget_keys, &values);
  270. hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
  271. hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
  272. hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
  273. hist_mget_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
  274. hist_mget_post_process.Add(get_perf_context()->get_post_process_time);
  275. hist_mget.Add(get_perf_context()->user_key_comparison_count);
  276. }
  277. if (FLAGS_verbose) {
  278. std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
  279. << "Get uesr key comparison: \n" << hist_get.ToString()
  280. << "MultiGet uesr key comparison: \n" << hist_get.ToString();
  281. std::cout << "Put(): Pre and Post Process Time: \n"
  282. << hist_write_pre_post.ToString() << " Writing WAL time: \n"
  283. << hist_write_wal_time.ToString() << "\n"
  284. << " Writing Mem Table time: \n"
  285. << hist_write_memtable_time.ToString() << "\n"
  286. << " Write Delay: \n" << hist_write_delay_time.ToString() << "\n"
  287. << " Waiting for Batch time: \n"
  288. << hist_write_thread_wait_nanos.ToString() << "\n"
  289. << " Scheduling Flushes and Compactions Time: \n"
  290. << hist_write_scheduling_time.ToString() << "\n"
  291. << " Total DB mutex nanos: \n" << total_db_mutex_nanos << "\n";
  292. std::cout << "Get(): Time to get snapshot: \n"
  293. << hist_get_snapshot.ToString()
  294. << " Time to get value from memtables: \n"
  295. << hist_get_memtable.ToString() << "\n"
  296. << " Time to get value from output files: \n"
  297. << hist_get_files.ToString() << "\n"
  298. << " Number of memtables checked: \n"
  299. << hist_num_memtable_checked.ToString() << "\n"
  300. << " Time to post process: \n" << hist_get_post_process.ToString()
  301. << "\n";
  302. std::cout << "MultiGet(): Time to get snapshot: \n"
  303. << hist_mget_snapshot.ToString()
  304. << " Time to get value from memtables: \n"
  305. << hist_mget_memtable.ToString() << "\n"
  306. << " Time to get value from output files: \n"
  307. << hist_mget_files.ToString() << "\n"
  308. << " Number of memtables checked: \n"
  309. << hist_mget_num_memtable_checked.ToString() << "\n"
  310. << " Time to post process: \n"
  311. << hist_mget_post_process.ToString() << "\n";
  312. }
  313. if (enabled_time) {
  314. ASSERT_GT(hist_get.Average(), 0);
  315. ASSERT_GT(hist_get_snapshot.Average(), 0);
  316. ASSERT_GT(hist_get_memtable.Average(), 0);
  317. ASSERT_GT(hist_get_files.Average(), 0);
  318. ASSERT_GT(hist_get_post_process.Average(), 0);
  319. ASSERT_GT(hist_num_memtable_checked.Average(), 0);
  320. ASSERT_GT(hist_mget.Average(), 0);
  321. ASSERT_GT(hist_mget_snapshot.Average(), 0);
  322. ASSERT_GT(hist_mget_memtable.Average(), 0);
  323. ASSERT_GT(hist_mget_files.Average(), 0);
  324. ASSERT_GT(hist_mget_post_process.Average(), 0);
  325. ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
  326. EXPECT_GT(hist_write_pre_post.Average(), 0);
  327. EXPECT_GT(hist_write_wal_time.Average(), 0);
  328. EXPECT_GT(hist_write_memtable_time.Average(), 0);
  329. EXPECT_EQ(hist_write_delay_time.Average(), 0);
  330. EXPECT_EQ(hist_write_thread_wait_nanos.Average(), 0);
  331. EXPECT_GT(hist_write_scheduling_time.Average(), 0);
  332. #ifndef NDEBUG
  333. ASSERT_GT(total_db_mutex_nanos, 2000U);
  334. #endif
  335. }
  336. db.reset();
  337. db = OpenDb(true);
  338. hist_get.Clear();
  339. hist_get_snapshot.Clear();
  340. hist_get_memtable.Clear();
  341. hist_get_files.Clear();
  342. hist_get_post_process.Clear();
  343. hist_num_memtable_checked.Clear();
  344. hist_mget.Clear();
  345. hist_mget_snapshot.Clear();
  346. hist_mget_memtable.Clear();
  347. hist_mget_files.Clear();
  348. hist_mget_post_process.Clear();
  349. hist_mget_num_memtable_checked.Clear();
  350. for (const int i : keys) {
  351. if (i == kFlushFlag) {
  352. continue;
  353. }
  354. std::string key = "k" + ToString(i);
  355. std::string expected_value = "v" + ToString(i);
  356. std::string value;
  357. std::vector<Slice> multiget_keys = {Slice(key)};
  358. std::vector<std::string> values;
  359. get_perf_context()->Reset();
  360. ASSERT_OK(db->Get(read_options, key, &value));
  361. ASSERT_EQ(expected_value, value);
  362. hist_get_snapshot.Add(get_perf_context()->get_snapshot_time);
  363. hist_get_memtable.Add(get_perf_context()->get_from_memtable_time);
  364. hist_get_files.Add(get_perf_context()->get_from_output_files_time);
  365. hist_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
  366. hist_get_post_process.Add(get_perf_context()->get_post_process_time);
  367. hist_get.Add(get_perf_context()->user_key_comparison_count);
  368. get_perf_context()->Reset();
  369. db->MultiGet(read_options, multiget_keys, &values);
  370. hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
  371. hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
  372. hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
  373. hist_mget_num_memtable_checked.Add(get_perf_context()->get_from_memtable_count);
  374. hist_mget_post_process.Add(get_perf_context()->get_post_process_time);
  375. hist_mget.Add(get_perf_context()->user_key_comparison_count);
  376. }
  377. if (FLAGS_verbose) {
  378. std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString()
  379. << "ReadOnly MultiGet uesr key comparison: \n"
  380. << hist_mget.ToString();
  381. std::cout << "ReadOnly Get(): Time to get snapshot: \n"
  382. << hist_get_snapshot.ToString()
  383. << " Time to get value from memtables: \n"
  384. << hist_get_memtable.ToString() << "\n"
  385. << " Time to get value from output files: \n"
  386. << hist_get_files.ToString() << "\n"
  387. << " Number of memtables checked: \n"
  388. << hist_num_memtable_checked.ToString() << "\n"
  389. << " Time to post process: \n" << hist_get_post_process.ToString()
  390. << "\n";
  391. std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n"
  392. << hist_mget_snapshot.ToString()
  393. << " Time to get value from memtables: \n"
  394. << hist_mget_memtable.ToString() << "\n"
  395. << " Time to get value from output files: \n"
  396. << hist_mget_files.ToString() << "\n"
  397. << " Number of memtables checked: \n"
  398. << hist_mget_num_memtable_checked.ToString() << "\n"
  399. << " Time to post process: \n"
  400. << hist_mget_post_process.ToString() << "\n";
  401. }
  402. if (enabled_time) {
  403. ASSERT_GT(hist_get.Average(), 0);
  404. ASSERT_GT(hist_get_memtable.Average(), 0);
  405. ASSERT_GT(hist_get_files.Average(), 0);
  406. ASSERT_GT(hist_num_memtable_checked.Average(), 0);
  407. // In read-only mode Get(), no super version operation is needed
  408. ASSERT_EQ(hist_get_post_process.Average(), 0);
  409. ASSERT_GT(hist_get_snapshot.Average(), 0);
  410. ASSERT_GT(hist_mget.Average(), 0);
  411. ASSERT_GT(hist_mget_snapshot.Average(), 0);
  412. ASSERT_GT(hist_mget_memtable.Average(), 0);
  413. ASSERT_GT(hist_mget_files.Average(), 0);
  414. ASSERT_GT(hist_mget_post_process.Average(), 0);
  415. ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
  416. }
  417. }
  418. #ifndef ROCKSDB_LITE
  419. TEST_F(PerfContextTest, KeyComparisonCount) {
  420. SetPerfLevel(kEnableCount);
  421. ProfileQueries();
  422. SetPerfLevel(kDisable);
  423. ProfileQueries();
  424. SetPerfLevel(kEnableTime);
  425. ProfileQueries(true);
  426. }
  427. #endif // ROCKSDB_LITE
  428. // make perf_context_test
  429. // export ROCKSDB_TESTS=PerfContextTest.SeekKeyComparison
  430. // For one memtable:
  431. // ./perf_context_test --write_buffer_size=500000 --total_keys=10000
  432. // For two memtables:
  433. // ./perf_context_test --write_buffer_size=250000 --total_keys=10000
  434. // Specify --random_key=1 to shuffle the key before insertion
  435. // Results show that, for sequential insertion, worst-case Seek Key comparison
  436. // is close to the total number of keys (linear), when there is only one
  437. // memtable. When there are two memtables, even the avg Seek Key comparison
  438. // starts to become linear to the input size.
  439. TEST_F(PerfContextTest, SeekKeyComparison) {
  440. DestroyDB(kDbName, Options());
  441. auto db = OpenDb();
  442. WriteOptions write_options;
  443. ReadOptions read_options;
  444. if (FLAGS_verbose) {
  445. std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
  446. }
  447. std::vector<int> keys;
  448. for (int i = 0; i < FLAGS_total_keys; ++i) {
  449. keys.push_back(i);
  450. }
  451. if (FLAGS_random_key) {
  452. std::random_shuffle(keys.begin(), keys.end());
  453. }
  454. HistogramImpl hist_put_time;
  455. HistogramImpl hist_wal_time;
  456. HistogramImpl hist_time_diff;
  457. SetPerfLevel(kEnableTime);
  458. StopWatchNano timer(Env::Default());
  459. for (const int i : keys) {
  460. std::string key = "k" + ToString(i);
  461. std::string value = "v" + ToString(i);
  462. get_perf_context()->Reset();
  463. timer.Start();
  464. db->Put(write_options, key, value);
  465. auto put_time = timer.ElapsedNanos();
  466. hist_put_time.Add(put_time);
  467. hist_wal_time.Add(get_perf_context()->write_wal_time);
  468. hist_time_diff.Add(put_time - get_perf_context()->write_wal_time);
  469. }
  470. if (FLAGS_verbose) {
  471. std::cout << "Put time:\n" << hist_put_time.ToString() << "WAL time:\n"
  472. << hist_wal_time.ToString() << "time diff:\n"
  473. << hist_time_diff.ToString();
  474. }
  475. HistogramImpl hist_seek;
  476. HistogramImpl hist_next;
  477. for (int i = 0; i < FLAGS_total_keys; ++i) {
  478. std::string key = "k" + ToString(i);
  479. std::string value = "v" + ToString(i);
  480. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  481. get_perf_context()->Reset();
  482. iter->Seek(key);
  483. ASSERT_TRUE(iter->Valid());
  484. ASSERT_EQ(iter->value().ToString(), value);
  485. hist_seek.Add(get_perf_context()->user_key_comparison_count);
  486. }
  487. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  488. for (iter->SeekToFirst(); iter->Valid();) {
  489. get_perf_context()->Reset();
  490. iter->Next();
  491. hist_next.Add(get_perf_context()->user_key_comparison_count);
  492. }
  493. if (FLAGS_verbose) {
  494. std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n"
  495. << hist_next.ToString();
  496. }
  497. }
  498. TEST_F(PerfContextTest, DBMutexLockCounter) {
  499. int stats_code[] = {0, static_cast<int>(DB_MUTEX_WAIT_MICROS)};
  500. for (PerfLevel perf_level_test :
  501. {PerfLevel::kEnableTimeExceptForMutex, PerfLevel::kEnableTime}) {
  502. for (int c = 0; c < 2; ++c) {
  503. InstrumentedMutex mutex(nullptr, Env::Default(), stats_code[c]);
  504. mutex.Lock();
  505. ROCKSDB_NAMESPACE::port::Thread child_thread([&] {
  506. SetPerfLevel(perf_level_test);
  507. get_perf_context()->Reset();
  508. ASSERT_EQ(get_perf_context()->db_mutex_lock_nanos, 0);
  509. mutex.Lock();
  510. mutex.Unlock();
  511. if (perf_level_test == PerfLevel::kEnableTimeExceptForMutex ||
  512. stats_code[c] != DB_MUTEX_WAIT_MICROS) {
  513. ASSERT_EQ(get_perf_context()->db_mutex_lock_nanos, 0);
  514. } else {
  515. // increment the counter only when it's a DB Mutex
  516. ASSERT_GT(get_perf_context()->db_mutex_lock_nanos, 0);
  517. }
  518. });
  519. Env::Default()->SleepForMicroseconds(100);
  520. mutex.Unlock();
  521. child_thread.join();
  522. }
  523. }
  524. }
  525. TEST_F(PerfContextTest, FalseDBMutexWait) {
  526. SetPerfLevel(kEnableTime);
  527. int stats_code[] = {0, static_cast<int>(DB_MUTEX_WAIT_MICROS)};
  528. for (int c = 0; c < 2; ++c) {
  529. InstrumentedMutex mutex(nullptr, Env::Default(), stats_code[c]);
  530. InstrumentedCondVar lock(&mutex);
  531. get_perf_context()->Reset();
  532. mutex.Lock();
  533. lock.TimedWait(100);
  534. mutex.Unlock();
  535. if (stats_code[c] == static_cast<int>(DB_MUTEX_WAIT_MICROS)) {
  536. // increment the counter only when it's a DB Mutex
  537. ASSERT_GT(get_perf_context()->db_condition_wait_nanos, 0);
  538. } else {
  539. ASSERT_EQ(get_perf_context()->db_condition_wait_nanos, 0);
  540. }
  541. }
  542. }
  543. TEST_F(PerfContextTest, ToString) {
  544. get_perf_context()->Reset();
  545. get_perf_context()->block_read_count = 12345;
  546. std::string zero_included = get_perf_context()->ToString();
  547. ASSERT_NE(std::string::npos, zero_included.find("= 0"));
  548. ASSERT_NE(std::string::npos, zero_included.find("= 12345"));
  549. std::string zero_excluded = get_perf_context()->ToString(true);
  550. ASSERT_EQ(std::string::npos, zero_excluded.find("= 0"));
  551. ASSERT_NE(std::string::npos, zero_excluded.find("= 12345"));
  552. }
  553. TEST_F(PerfContextTest, MergeOperatorTime) {
  554. DestroyDB(kDbName, Options());
  555. DB* db;
  556. Options options;
  557. options.create_if_missing = true;
  558. options.merge_operator = MergeOperators::CreateStringAppendOperator();
  559. Status s = DB::Open(options, kDbName, &db);
  560. EXPECT_OK(s);
  561. std::string val;
  562. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val1"));
  563. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val2"));
  564. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val3"));
  565. ASSERT_OK(db->Merge(WriteOptions(), "k1", "val4"));
  566. SetPerfLevel(kEnableTime);
  567. get_perf_context()->Reset();
  568. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  569. #ifdef OS_SOLARIS
  570. for (int i = 0; i < 100; i++) {
  571. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  572. }
  573. #endif
  574. EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
  575. ASSERT_OK(db->Flush(FlushOptions()));
  576. get_perf_context()->Reset();
  577. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  578. #ifdef OS_SOLARIS
  579. for (int i = 0; i < 100; i++) {
  580. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  581. }
  582. #endif
  583. EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
  584. ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  585. get_perf_context()->Reset();
  586. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  587. #ifdef OS_SOLARIS
  588. for (int i = 0; i < 100; i++) {
  589. ASSERT_OK(db->Get(ReadOptions(), "k1", &val));
  590. }
  591. #endif
  592. EXPECT_GT(get_perf_context()->merge_operator_time_nanos, 0);
  593. delete db;
  594. }
  595. TEST_F(PerfContextTest, CopyAndMove) {
  596. // Assignment operator
  597. {
  598. get_perf_context()->Reset();
  599. get_perf_context()->EnablePerLevelPerfContext();
  600. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  601. ASSERT_EQ(
  602. 1,
  603. (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  604. PerfContext perf_context_assign;
  605. perf_context_assign = *get_perf_context();
  606. ASSERT_EQ(
  607. 1,
  608. (*(perf_context_assign.level_to_perf_context))[5].bloom_filter_useful);
  609. get_perf_context()->ClearPerLevelPerfContext();
  610. get_perf_context()->Reset();
  611. ASSERT_EQ(
  612. 1,
  613. (*(perf_context_assign.level_to_perf_context))[5].bloom_filter_useful);
  614. perf_context_assign.ClearPerLevelPerfContext();
  615. perf_context_assign.Reset();
  616. }
  617. // Copy constructor
  618. {
  619. get_perf_context()->Reset();
  620. get_perf_context()->EnablePerLevelPerfContext();
  621. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  622. ASSERT_EQ(
  623. 1,
  624. (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  625. PerfContext perf_context_copy(*get_perf_context());
  626. ASSERT_EQ(
  627. 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful);
  628. get_perf_context()->ClearPerLevelPerfContext();
  629. get_perf_context()->Reset();
  630. ASSERT_EQ(
  631. 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful);
  632. perf_context_copy.ClearPerLevelPerfContext();
  633. perf_context_copy.Reset();
  634. }
  635. // Move constructor
  636. {
  637. get_perf_context()->Reset();
  638. get_perf_context()->EnablePerLevelPerfContext();
  639. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  640. ASSERT_EQ(
  641. 1,
  642. (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  643. PerfContext perf_context_move = std::move(*get_perf_context());
  644. ASSERT_EQ(
  645. 1, (*(perf_context_move.level_to_perf_context))[5].bloom_filter_useful);
  646. get_perf_context()->ClearPerLevelPerfContext();
  647. get_perf_context()->Reset();
  648. ASSERT_EQ(
  649. 1, (*(perf_context_move.level_to_perf_context))[5].bloom_filter_useful);
  650. perf_context_move.ClearPerLevelPerfContext();
  651. perf_context_move.Reset();
  652. }
  653. }
  654. TEST_F(PerfContextTest, PerfContextDisableEnable) {
  655. get_perf_context()->Reset();
  656. get_perf_context()->EnablePerLevelPerfContext();
  657. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, 0);
  658. get_perf_context()->DisablePerLevelPerfContext();
  659. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  660. get_perf_context()->EnablePerLevelPerfContext();
  661. PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1, 0);
  662. get_perf_context()->DisablePerLevelPerfContext();
  663. PerfContext perf_context_copy(*get_perf_context());
  664. ASSERT_EQ(1, (*(perf_context_copy.level_to_perf_context))[0]
  665. .bloom_filter_full_positive);
  666. // this was set when per level perf context is disabled, should not be copied
  667. ASSERT_NE(
  668. 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful);
  669. ASSERT_EQ(
  670. 1, (*(perf_context_copy.level_to_perf_context))[0].block_cache_hit_count);
  671. perf_context_copy.ClearPerLevelPerfContext();
  672. perf_context_copy.Reset();
  673. get_perf_context()->ClearPerLevelPerfContext();
  674. get_perf_context()->Reset();
  675. }
  676. TEST_F(PerfContextTest, PerfContextByLevelGetSet) {
  677. get_perf_context()->Reset();
  678. get_perf_context()->EnablePerLevelPerfContext();
  679. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, 0);
  680. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5);
  681. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 7);
  682. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 7);
  683. PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1, 2);
  684. PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1, 0);
  685. PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 5, 2);
  686. PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 2, 3);
  687. PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 4, 1);
  688. ASSERT_EQ(
  689. 0, (*(get_perf_context()->level_to_perf_context))[0].bloom_filter_useful);
  690. ASSERT_EQ(
  691. 1, (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful);
  692. ASSERT_EQ(
  693. 2, (*(get_perf_context()->level_to_perf_context))[7].bloom_filter_useful);
  694. ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[0]
  695. .bloom_filter_full_positive);
  696. ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[2]
  697. .bloom_filter_full_true_positive);
  698. ASSERT_EQ(1, (*(get_perf_context()->level_to_perf_context))[0]
  699. .block_cache_hit_count);
  700. ASSERT_EQ(5, (*(get_perf_context()->level_to_perf_context))[2]
  701. .block_cache_hit_count);
  702. ASSERT_EQ(2, (*(get_perf_context()->level_to_perf_context))[3]
  703. .block_cache_miss_count);
  704. ASSERT_EQ(4, (*(get_perf_context()->level_to_perf_context))[1]
  705. .block_cache_miss_count);
  706. std::string zero_excluded = get_perf_context()->ToString(true);
  707. ASSERT_NE(std::string::npos,
  708. zero_excluded.find("bloom_filter_useful = 1@level5, 2@level7"));
  709. ASSERT_NE(std::string::npos,
  710. zero_excluded.find("bloom_filter_full_positive = 1@level0"));
  711. ASSERT_NE(std::string::npos,
  712. zero_excluded.find("bloom_filter_full_true_positive = 1@level2"));
  713. ASSERT_NE(std::string::npos,
  714. zero_excluded.find("block_cache_hit_count = 1@level0, 5@level2"));
  715. ASSERT_NE(std::string::npos,
  716. zero_excluded.find("block_cache_miss_count = 4@level1, 2@level3"));
  717. }
  718. TEST_F(PerfContextTest, CPUTimer) {
  719. DestroyDB(kDbName, Options());
  720. auto db = OpenDb();
  721. WriteOptions write_options;
  722. ReadOptions read_options;
  723. SetPerfLevel(PerfLevel::kEnableTimeAndCPUTimeExceptForMutex);
  724. std::string max_str = "0";
  725. for (int i = 0; i < FLAGS_total_keys; ++i) {
  726. std::string i_str = ToString(i);
  727. std::string key = "k" + i_str;
  728. std::string value = "v" + i_str;
  729. max_str = max_str > i_str ? max_str : i_str;
  730. db->Put(write_options, key, value);
  731. }
  732. std::string last_key = "k" + max_str;
  733. std::string last_value = "v" + max_str;
  734. {
  735. // Get
  736. get_perf_context()->Reset();
  737. std::string value;
  738. ASSERT_OK(db->Get(read_options, "k0", &value));
  739. ASSERT_EQ(value, "v0");
  740. if (FLAGS_verbose) {
  741. std::cout << "Get CPU time nanos: " << get_perf_context()->get_cpu_nanos
  742. << "ns\n";
  743. }
  744. // Iter
  745. std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
  746. // Seek
  747. get_perf_context()->Reset();
  748. iter->Seek(last_key);
  749. ASSERT_TRUE(iter->Valid());
  750. ASSERT_EQ(last_value, iter->value().ToString());
  751. if (FLAGS_verbose) {
  752. std::cout << "Iter Seek CPU time nanos: "
  753. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  754. }
  755. // SeekForPrev
  756. get_perf_context()->Reset();
  757. iter->SeekForPrev(last_key);
  758. ASSERT_TRUE(iter->Valid());
  759. if (FLAGS_verbose) {
  760. std::cout << "Iter SeekForPrev CPU time nanos: "
  761. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  762. }
  763. // SeekToLast
  764. get_perf_context()->Reset();
  765. iter->SeekToLast();
  766. ASSERT_TRUE(iter->Valid());
  767. ASSERT_EQ(last_value, iter->value().ToString());
  768. if (FLAGS_verbose) {
  769. std::cout << "Iter SeekToLast CPU time nanos: "
  770. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  771. }
  772. // SeekToFirst
  773. get_perf_context()->Reset();
  774. iter->SeekToFirst();
  775. ASSERT_TRUE(iter->Valid());
  776. ASSERT_EQ("v0", iter->value().ToString());
  777. if (FLAGS_verbose) {
  778. std::cout << "Iter SeekToFirst CPU time nanos: "
  779. << get_perf_context()->iter_seek_cpu_nanos << "ns\n";
  780. }
  781. // Next
  782. get_perf_context()->Reset();
  783. iter->Next();
  784. ASSERT_TRUE(iter->Valid());
  785. ASSERT_EQ("v1", iter->value().ToString());
  786. if (FLAGS_verbose) {
  787. std::cout << "Iter Next CPU time nanos: "
  788. << get_perf_context()->iter_next_cpu_nanos << "ns\n";
  789. }
  790. // Prev
  791. get_perf_context()->Reset();
  792. iter->Prev();
  793. ASSERT_TRUE(iter->Valid());
  794. ASSERT_EQ("v0", iter->value().ToString());
  795. if (FLAGS_verbose) {
  796. std::cout << "Iter Prev CPU time nanos: "
  797. << get_perf_context()->iter_prev_cpu_nanos << "ns\n";
  798. }
  799. // monotonically increasing
  800. get_perf_context()->Reset();
  801. auto count = get_perf_context()->iter_seek_cpu_nanos;
  802. for (int i = 0; i < FLAGS_total_keys; ++i) {
  803. iter->Seek("k" + ToString(i));
  804. ASSERT_TRUE(iter->Valid());
  805. ASSERT_EQ("v" + ToString(i), iter->value().ToString());
  806. auto next_count = get_perf_context()->iter_seek_cpu_nanos;
  807. ASSERT_GT(next_count, count);
  808. count = next_count;
  809. }
  810. // iterator creation/destruction; multiple iterators
  811. {
  812. std::unique_ptr<Iterator> iter2(db->NewIterator(read_options));
  813. ASSERT_EQ(count, get_perf_context()->iter_seek_cpu_nanos);
  814. iter2->Seek(last_key);
  815. ASSERT_TRUE(iter2->Valid());
  816. ASSERT_EQ(last_value, iter2->value().ToString());
  817. ASSERT_GT(get_perf_context()->iter_seek_cpu_nanos, count);
  818. count = get_perf_context()->iter_seek_cpu_nanos;
  819. }
  820. ASSERT_EQ(count, get_perf_context()->iter_seek_cpu_nanos);
  821. }
  822. }
  823. } // namespace ROCKSDB_NAMESPACE
  824. int main(int argc, char** argv) {
  825. ::testing::InitGoogleTest(&argc, argv);
  826. for (int i = 1; i < argc; i++) {
  827. int n;
  828. char junk;
  829. if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
  830. FLAGS_write_buffer_size = n;
  831. }
  832. if (sscanf(argv[i], "--total_keys=%d%c", &n, &junk) == 1) {
  833. FLAGS_total_keys = n;
  834. }
  835. if (sscanf(argv[i], "--random_key=%d%c", &n, &junk) == 1 &&
  836. (n == 0 || n == 1)) {
  837. FLAGS_random_key = n;
  838. }
  839. if (sscanf(argv[i], "--use_set_based_memetable=%d%c", &n, &junk) == 1 &&
  840. (n == 0 || n == 1)) {
  841. FLAGS_use_set_based_memetable = n;
  842. }
  843. if (sscanf(argv[i], "--verbose=%d%c", &n, &junk) == 1 &&
  844. (n == 0 || n == 1)) {
  845. FLAGS_verbose = n;
  846. }
  847. }
  848. if (FLAGS_verbose) {
  849. std::cout << kDbName << "\n";
  850. }
  851. return RUN_ALL_TESTS();
  852. }