db_basic_bench.cc 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. #ifndef OS_WIN
  6. #include <unistd.h>
  7. #endif // ! OS_WIN
  8. #include "benchmark/benchmark.h"
  9. #include "db/db_impl/db_impl.h"
  10. #include "rocksdb/db.h"
  11. #include "rocksdb/filter_policy.h"
  12. #include "rocksdb/options.h"
  13. #include "table/block_based/block.h"
  14. #include "table/block_based/block_builder.h"
  15. #include "util/random.h"
  16. #include "utilities/merge_operators.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. class KeyGenerator {
  19. public:
  20. // Generate next key
  21. // buff: the caller needs to make sure there's enough space for generated key
  22. // offset: to control the group of the key, 0 means normal key, 1 means
  23. // non-existing key, 2 is reserved prefix_only: only return a prefix
  24. Slice Next(char* buff, int8_t offset = 0, bool prefix_only = false) {
  25. assert(max_key_ < std::numeric_limits<uint32_t>::max() /
  26. MULTIPLIER); // TODO: add large key support
  27. uint32_t k;
  28. if (is_sequential_) {
  29. assert(next_sequential_key_ < max_key_);
  30. k = (next_sequential_key_ % max_key_) * MULTIPLIER + offset;
  31. if (next_sequential_key_ + 1 == max_key_) {
  32. next_sequential_key_ = 0;
  33. } else {
  34. next_sequential_key_++;
  35. }
  36. } else {
  37. k = (rnd_->Next() % max_key_) * MULTIPLIER + offset;
  38. }
  39. // TODO: make sure the buff is large enough
  40. memset(buff, 0, key_size_);
  41. if (prefix_num_ > 0) {
  42. uint32_t prefix = (k % prefix_num_) * MULTIPLIER + offset;
  43. Encode(buff, prefix);
  44. if (prefix_only) {
  45. return {buff, prefix_size_};
  46. }
  47. }
  48. Encode(buff + prefix_size_, k);
  49. return {buff, key_size_};
  50. }
  51. // use internal buffer for generated key, make sure there's only one caller in
  52. // single thread
  53. Slice Next() { return Next(buff_); }
  54. // user internal buffer for generated prefix
  55. Slice NextPrefix() {
  56. assert(prefix_num_ > 0);
  57. return Next(buff_, 0, true);
  58. }
  59. // helper function to get non exist key
  60. Slice NextNonExist() { return Next(buff_, 1); }
  61. Slice MaxKey(char* buff) const {
  62. memset(buff, 0xff, key_size_);
  63. return {buff, key_size_};
  64. }
  65. Slice MinKey(char* buff) const {
  66. memset(buff, 0, key_size_);
  67. return {buff, key_size_};
  68. }
  69. // max_key: the max key that it could generate
  70. // prefix_num: the max prefix number
  71. // key_size: in bytes
  72. explicit KeyGenerator(Random* rnd, uint64_t max_key = 100 * 1024 * 1024,
  73. size_t prefix_num = 0, size_t key_size = 10) {
  74. prefix_num_ = prefix_num;
  75. key_size_ = key_size;
  76. max_key_ = max_key;
  77. rnd_ = rnd;
  78. if (prefix_num > 0) {
  79. prefix_size_ = 4; // TODO: support different prefix_size
  80. }
  81. }
  82. // generate sequential keys
  83. explicit KeyGenerator(uint64_t max_key = 100 * 1024 * 1024,
  84. size_t key_size = 10) {
  85. key_size_ = key_size;
  86. max_key_ = max_key;
  87. rnd_ = nullptr;
  88. is_sequential_ = true;
  89. }
  90. private:
  91. Random* rnd_;
  92. size_t prefix_num_ = 0;
  93. size_t prefix_size_ = 0;
  94. size_t key_size_;
  95. uint64_t max_key_;
  96. bool is_sequential_ = false;
  97. uint32_t next_sequential_key_ = 0;
  98. char buff_[256] = {0};
  99. const int MULTIPLIER = 3;
  100. void static Encode(char* buf, uint32_t value) {
  101. if (port::kLittleEndian) {
  102. buf[0] = static_cast<char>((value >> 24) & 0xff);
  103. buf[1] = static_cast<char>((value >> 16) & 0xff);
  104. buf[2] = static_cast<char>((value >> 8) & 0xff);
  105. buf[3] = static_cast<char>(value & 0xff);
  106. } else {
  107. memcpy(buf, &value, sizeof(value));
  108. }
  109. }
  110. };
  111. static void SetupDB(benchmark::State& state, Options& options,
  112. std::unique_ptr<DB>* db,
  113. const std::string& test_name = "") {
  114. options.create_if_missing = true;
  115. auto env = Env::Default();
  116. std::string db_path;
  117. Status s = env->GetTestDirectory(&db_path);
  118. if (!s.ok()) {
  119. state.SkipWithError(s.ToString().c_str());
  120. return;
  121. }
  122. std::string db_name =
  123. db_path + kFilePathSeparator + test_name + std::to_string(getpid());
  124. DestroyDB(db_name, options);
  125. DB* db_ptr = nullptr;
  126. s = DB::Open(options, db_name, &db_ptr);
  127. if (!s.ok()) {
  128. state.SkipWithError(s.ToString().c_str());
  129. return;
  130. }
  131. db->reset(db_ptr);
  132. }
  133. static void TeardownDB(benchmark::State& state, const std::unique_ptr<DB>& db,
  134. const Options& options, KeyGenerator& kg) {
  135. char min_buff[256], max_buff[256];
  136. const Range r(kg.MinKey(min_buff), kg.MaxKey(max_buff));
  137. uint64_t size;
  138. Status s = db->GetApproximateSizes(&r, 1, &size);
  139. if (!s.ok()) {
  140. state.SkipWithError(s.ToString().c_str());
  141. }
  142. state.counters["db_size"] = static_cast<double>(size);
  143. std::string db_name = db->GetName();
  144. s = db->Close();
  145. if (!s.ok()) {
  146. state.SkipWithError(s.ToString().c_str());
  147. }
  148. DestroyDB(db_name, options);
  149. }
  150. static void DBOpen(benchmark::State& state) {
  151. // create DB
  152. std::unique_ptr<DB> db;
  153. Options options;
  154. SetupDB(state, options, &db, "DBOpen");
  155. std::string db_name = db->GetName();
  156. db->Close();
  157. options.create_if_missing = false;
  158. auto rnd = Random(123);
  159. for (auto _ : state) {
  160. {
  161. DB* db_ptr = nullptr;
  162. Status s = DB::Open(options, db_name, &db_ptr);
  163. if (!s.ok()) {
  164. state.SkipWithError(s.ToString().c_str());
  165. }
  166. db.reset(db_ptr);
  167. }
  168. state.PauseTiming();
  169. auto wo = WriteOptions();
  170. Status s;
  171. for (int i = 0; i < 2; i++) {
  172. for (int j = 0; j < 100; j++) {
  173. s = db->Put(wo, rnd.RandomString(10), rnd.RandomString(100));
  174. if (!s.ok()) {
  175. state.SkipWithError(s.ToString().c_str());
  176. }
  177. }
  178. s = db->Flush(FlushOptions());
  179. }
  180. if (!s.ok()) {
  181. state.SkipWithError(s.ToString().c_str());
  182. }
  183. s = db->Close();
  184. if (!s.ok()) {
  185. state.SkipWithError(s.ToString().c_str());
  186. }
  187. state.ResumeTiming();
  188. }
  189. DestroyDB(db_name, options);
  190. }
  191. BENCHMARK(DBOpen)->Iterations(200); // specify iteration number as the db size
  192. // is impacted by iteration number
  193. static void DBClose(benchmark::State& state) {
  194. // create DB
  195. std::unique_ptr<DB> db;
  196. Options options;
  197. SetupDB(state, options, &db, "DBClose");
  198. std::string db_name = db->GetName();
  199. db->Close();
  200. options.create_if_missing = false;
  201. auto rnd = Random(12345);
  202. for (auto _ : state) {
  203. state.PauseTiming();
  204. {
  205. DB* db_ptr = nullptr;
  206. Status s = DB::Open(options, db_name, &db_ptr);
  207. if (!s.ok()) {
  208. state.SkipWithError(s.ToString().c_str());
  209. }
  210. db.reset(db_ptr);
  211. }
  212. auto wo = WriteOptions();
  213. Status s;
  214. for (int i = 0; i < 2; i++) {
  215. for (int j = 0; j < 100; j++) {
  216. s = db->Put(wo, rnd.RandomString(10), rnd.RandomString(100));
  217. if (!s.ok()) {
  218. state.SkipWithError(s.ToString().c_str());
  219. }
  220. }
  221. s = db->Flush(FlushOptions());
  222. }
  223. if (!s.ok()) {
  224. state.SkipWithError(s.ToString().c_str());
  225. }
  226. state.ResumeTiming();
  227. s = db->Close();
  228. if (!s.ok()) {
  229. state.SkipWithError(s.ToString().c_str());
  230. }
  231. }
  232. DestroyDB(db_name, options);
  233. }
  234. BENCHMARK(DBClose)->Iterations(200); // specify iteration number as the db size
  235. // is impacted by iteration number
  236. static void DBPut(benchmark::State& state) {
  237. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  238. uint64_t max_data = state.range(1);
  239. uint64_t per_key_size = state.range(2);
  240. bool enable_statistics = state.range(3);
  241. bool enable_wal = state.range(4);
  242. uint64_t key_num = max_data / per_key_size;
  243. // setup DB
  244. static std::unique_ptr<DB> db = nullptr;
  245. Options options;
  246. if (enable_statistics) {
  247. options.statistics = CreateDBStatistics();
  248. }
  249. options.compaction_style = compaction_style;
  250. auto rnd = Random(301 + state.thread_index());
  251. KeyGenerator kg(&rnd, key_num);
  252. if (state.thread_index() == 0) {
  253. SetupDB(state, options, &db, "DBPut");
  254. }
  255. auto wo = WriteOptions();
  256. wo.disableWAL = !enable_wal;
  257. for (auto _ : state) {
  258. state.PauseTiming();
  259. Slice key = kg.Next();
  260. std::string val = rnd.RandomString(static_cast<int>(per_key_size));
  261. state.ResumeTiming();
  262. Status s = db->Put(wo, key, val);
  263. if (!s.ok()) {
  264. state.SkipWithError(s.ToString().c_str());
  265. }
  266. }
  267. if (state.thread_index() == 0) {
  268. auto db_full = static_cast_with_check<DBImpl>(db.get());
  269. Status s = db_full->WaitForCompact(WaitForCompactOptions());
  270. if (!s.ok()) {
  271. state.SkipWithError(s.ToString().c_str());
  272. return;
  273. }
  274. if (enable_statistics) {
  275. HistogramData histogram_data;
  276. options.statistics->histogramData(DB_WRITE, &histogram_data);
  277. state.counters["put_mean"] = histogram_data.average * std::milli::den;
  278. state.counters["put_p95"] = histogram_data.percentile95 * std::milli::den;
  279. state.counters["put_p99"] = histogram_data.percentile99 * std::milli::den;
  280. }
  281. TeardownDB(state, db, options, kg);
  282. }
  283. }
  284. static void DBPutArguments(benchmark::internal::Benchmark* b) {
  285. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal,
  286. kCompactionStyleFIFO}) {
  287. for (int64_t max_data : {100l << 30}) {
  288. for (int64_t per_key_size : {256, 1024}) {
  289. for (bool enable_statistics : {false, true}) {
  290. for (bool wal : {false, true}) {
  291. b->Args(
  292. {comp_style, max_data, per_key_size, enable_statistics, wal});
  293. }
  294. }
  295. }
  296. }
  297. }
  298. b->ArgNames(
  299. {"comp_style", "max_data", "per_key_size", "enable_statistics", "wal"});
  300. }
  301. static const uint64_t DBPutNum = 409600l;
  302. BENCHMARK(DBPut)->Threads(1)->Iterations(DBPutNum)->Apply(DBPutArguments);
  303. BENCHMARK(DBPut)->Threads(8)->Iterations(DBPutNum / 8)->Apply(DBPutArguments);
  304. static void ManualCompaction(benchmark::State& state) {
  305. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  306. uint64_t max_data = state.range(1);
  307. uint64_t per_key_size = state.range(2);
  308. bool enable_statistics = state.range(3);
  309. uint64_t key_num = max_data / per_key_size;
  310. // setup DB
  311. static std::unique_ptr<DB> db;
  312. Options options;
  313. if (enable_statistics) {
  314. options.statistics = CreateDBStatistics();
  315. }
  316. options.compaction_style = compaction_style;
  317. // No auto compaction
  318. options.disable_auto_compactions = true;
  319. options.level0_file_num_compaction_trigger = (1 << 30);
  320. options.level0_slowdown_writes_trigger = (1 << 30);
  321. options.level0_stop_writes_trigger = (1 << 30);
  322. options.soft_pending_compaction_bytes_limit = 0;
  323. options.hard_pending_compaction_bytes_limit = 0;
  324. auto rnd = Random(301 + state.thread_index());
  325. KeyGenerator kg(&rnd, key_num);
  326. if (state.thread_index() == 0) {
  327. SetupDB(state, options, &db, "ManualCompaction");
  328. }
  329. auto wo = WriteOptions();
  330. wo.disableWAL = true;
  331. uint64_t flush_mod = key_num / 4; // at least generate 4 files for compaction
  332. for (uint64_t i = 0; i < key_num; i++) {
  333. Status s = db->Put(wo, kg.Next(),
  334. rnd.RandomString(static_cast<int>(per_key_size)));
  335. if (!s.ok()) {
  336. state.SkipWithError(s.ToString().c_str());
  337. }
  338. if (i + 1 % flush_mod == 0) {
  339. s = db->Flush(FlushOptions());
  340. }
  341. }
  342. FlushOptions fo;
  343. Status s = db->Flush(fo);
  344. if (!s.ok()) {
  345. state.SkipWithError(s.ToString().c_str());
  346. }
  347. std::vector<LiveFileMetaData> files_meta;
  348. db->GetLiveFilesMetaData(&files_meta);
  349. std::vector<std::string> files_before_compact;
  350. files_before_compact.reserve(files_meta.size());
  351. for (const LiveFileMetaData& file : files_meta) {
  352. files_before_compact.emplace_back(file.name);
  353. }
  354. SetPerfLevel(kEnableTime);
  355. get_perf_context()->EnablePerLevelPerfContext();
  356. get_perf_context()->Reset();
  357. CompactionOptions co;
  358. for (auto _ : state) {
  359. s = db->CompactFiles(co, files_before_compact, 1);
  360. if (!s.ok()) {
  361. state.SkipWithError(s.ToString().c_str());
  362. }
  363. }
  364. if (state.thread_index() == 0) {
  365. auto db_full = static_cast_with_check<DBImpl>(db.get());
  366. s = db_full->WaitForCompact(WaitForCompactOptions());
  367. if (!s.ok()) {
  368. state.SkipWithError(s.ToString().c_str());
  369. return;
  370. }
  371. if (enable_statistics) {
  372. HistogramData histogram_data;
  373. options.statistics->histogramData(COMPACTION_TIME, &histogram_data);
  374. state.counters["comp_time"] = histogram_data.average;
  375. options.statistics->histogramData(COMPACTION_CPU_TIME, &histogram_data);
  376. state.counters["comp_cpu_time"] = histogram_data.average;
  377. options.statistics->histogramData(COMPACTION_OUTFILE_SYNC_MICROS,
  378. &histogram_data);
  379. state.counters["comp_outfile_sync"] = histogram_data.average;
  380. state.counters["comp_read"] = static_cast<double>(
  381. options.statistics->getTickerCount(COMPACT_READ_BYTES));
  382. state.counters["comp_write"] = static_cast<double>(
  383. options.statistics->getTickerCount(COMPACT_WRITE_BYTES));
  384. state.counters["user_key_comparison_count"] =
  385. static_cast<double>(get_perf_context()->user_key_comparison_count);
  386. state.counters["block_read_count"] =
  387. static_cast<double>(get_perf_context()->block_read_count);
  388. state.counters["block_read_time"] =
  389. static_cast<double>(get_perf_context()->block_read_time);
  390. state.counters["block_read_cpu_time"] =
  391. static_cast<double>(get_perf_context()->block_read_cpu_time);
  392. state.counters["block_checksum_time"] =
  393. static_cast<double>(get_perf_context()->block_checksum_time);
  394. state.counters["new_table_block_iter_nanos"] =
  395. static_cast<double>(get_perf_context()->new_table_block_iter_nanos);
  396. state.counters["new_table_iterator_nanos"] =
  397. static_cast<double>(get_perf_context()->new_table_iterator_nanos);
  398. state.counters["find_table_nanos"] =
  399. static_cast<double>(get_perf_context()->find_table_nanos);
  400. }
  401. TeardownDB(state, db, options, kg);
  402. }
  403. }
  404. static void ManualCompactionArguments(benchmark::internal::Benchmark* b) {
  405. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal}) {
  406. for (int64_t max_data : {32l << 20, 128l << 20}) {
  407. for (int64_t per_key_size : {256, 1024}) {
  408. for (bool enable_statistics : {false, true}) {
  409. b->Args({comp_style, max_data, per_key_size, enable_statistics});
  410. }
  411. }
  412. }
  413. }
  414. b->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics"});
  415. }
  416. BENCHMARK(ManualCompaction)->Iterations(1)->Apply(ManualCompactionArguments);
  417. static void ManualFlush(benchmark::State& state) {
  418. uint64_t key_num = state.range(0);
  419. uint64_t per_key_size = state.range(1);
  420. bool enable_statistics = true;
  421. // setup DB
  422. static std::unique_ptr<DB> db;
  423. Options options;
  424. if (enable_statistics) {
  425. options.statistics = CreateDBStatistics();
  426. }
  427. options.disable_auto_compactions = true;
  428. options.level0_file_num_compaction_trigger = (1 << 30);
  429. options.level0_slowdown_writes_trigger = (1 << 30);
  430. options.level0_stop_writes_trigger = (1 << 30);
  431. options.soft_pending_compaction_bytes_limit = 0;
  432. options.hard_pending_compaction_bytes_limit = 0;
  433. options.write_buffer_size = 2l << 30; // 2G to avoid auto flush
  434. auto rnd = Random(301 + state.thread_index());
  435. KeyGenerator kg(&rnd, key_num);
  436. if (state.thread_index() == 0) {
  437. SetupDB(state, options, &db, "ManualFlush");
  438. }
  439. auto wo = WriteOptions();
  440. for (auto _ : state) {
  441. state.PauseTiming();
  442. for (uint64_t i = 0; i < key_num; i++) {
  443. Status s = db->Put(wo, kg.Next(),
  444. rnd.RandomString(static_cast<int>(per_key_size)));
  445. }
  446. FlushOptions fo;
  447. state.ResumeTiming();
  448. Status s = db->Flush(fo);
  449. if (!s.ok()) {
  450. state.SkipWithError(s.ToString().c_str());
  451. }
  452. }
  453. if (state.thread_index() == 0) {
  454. auto db_full = static_cast_with_check<DBImpl>(db.get());
  455. Status s = db_full->WaitForCompact(WaitForCompactOptions());
  456. if (!s.ok()) {
  457. state.SkipWithError(s.ToString().c_str());
  458. return;
  459. }
  460. if (enable_statistics) {
  461. HistogramData histogram_data;
  462. options.statistics->histogramData(FLUSH_TIME, &histogram_data);
  463. state.counters["flush_time"] = histogram_data.average;
  464. state.counters["flush_write_bytes"] = static_cast<double>(
  465. options.statistics->getTickerCount(FLUSH_WRITE_BYTES));
  466. }
  467. TeardownDB(state, db, options, kg);
  468. }
  469. }
  470. static void ManualFlushArguments(benchmark::internal::Benchmark* b) {
  471. for (int64_t key_num : {1l << 10, 8l << 10, 64l << 10}) {
  472. for (int64_t per_key_size : {256, 1024}) {
  473. b->Args({key_num, per_key_size});
  474. }
  475. }
  476. b->ArgNames({"key_num", "per_key_size"});
  477. }
  478. BENCHMARK(ManualFlush)->Iterations(1)->Apply(ManualFlushArguments);
  479. // Copied from test_util.cc to not depend on rocksdb_test_lib
  480. // when building microbench binaries.
  481. static Slice CompressibleString(Random* rnd, double compressed_fraction,
  482. int len, std::string* dst) {
  483. int raw = static_cast<int>(len * compressed_fraction);
  484. if (raw < 1) {
  485. raw = 1;
  486. }
  487. std::string raw_data = rnd->RandomBinaryString(raw);
  488. // Duplicate the random data until we have filled "len" bytes
  489. dst->clear();
  490. while (dst->size() < (unsigned int)len) {
  491. dst->append(raw_data);
  492. }
  493. dst->resize(len);
  494. return Slice(*dst);
  495. }
  496. static void DBGet(benchmark::State& state) {
  497. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  498. uint64_t max_data = state.range(1);
  499. uint64_t per_key_size = state.range(2);
  500. bool enable_statistics = state.range(3);
  501. bool negative_query = state.range(4);
  502. bool enable_filter = state.range(5);
  503. bool mmap = state.range(6);
  504. auto compression_type = static_cast<CompressionType>(state.range(7));
  505. bool compression_checksum = static_cast<bool>(state.range(8));
  506. bool no_blockcache = state.range(9);
  507. uint64_t key_num = max_data / per_key_size;
  508. // setup DB
  509. static std::unique_ptr<DB> db;
  510. Options options;
  511. if (enable_statistics) {
  512. options.statistics = CreateDBStatistics();
  513. }
  514. if (mmap) {
  515. options.allow_mmap_reads = true;
  516. options.compression = kNoCompression;
  517. }
  518. options.compaction_style = compaction_style;
  519. BlockBasedTableOptions table_options;
  520. if (enable_filter) {
  521. table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
  522. }
  523. if (mmap) {
  524. table_options.no_block_cache = true;
  525. table_options.block_restart_interval = 1;
  526. }
  527. options.compression = compression_type;
  528. options.compression_opts.checksum = compression_checksum;
  529. if (no_blockcache) {
  530. table_options.no_block_cache = true;
  531. } else {
  532. table_options.block_cache = NewLRUCache(100 << 20);
  533. }
  534. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  535. auto rnd = Random(301 + state.thread_index());
  536. if (state.thread_index() == 0) {
  537. KeyGenerator kg_seq(key_num /* max_key */);
  538. SetupDB(state, options, &db, "DBGet");
  539. // Load all valid keys into DB. That way, iterations in `!negative_query`
  540. // runs can always find the key even though it is generated from a random
  541. // number.
  542. auto wo = WriteOptions();
  543. wo.disableWAL = true;
  544. std::string val;
  545. for (uint64_t i = 0; i < key_num; i++) {
  546. CompressibleString(&rnd, 0.5, static_cast<int>(per_key_size), &val);
  547. Status s = db->Put(wo, kg_seq.Next(), val);
  548. if (!s.ok()) {
  549. state.SkipWithError(s.ToString().c_str());
  550. }
  551. }
  552. // Compact whole DB into one level, so each iteration will consider the same
  553. // number of files (one).
  554. Status s = db->CompactRange(CompactRangeOptions(), nullptr /* begin */,
  555. nullptr /* end */);
  556. if (!s.ok()) {
  557. state.SkipWithError(s.ToString().c_str());
  558. }
  559. }
  560. KeyGenerator kg_rnd(&rnd, key_num /* max_key */);
  561. auto ro = ReadOptions();
  562. if (mmap) {
  563. ro.verify_checksums = false;
  564. }
  565. size_t not_found = 0;
  566. if (negative_query) {
  567. for (auto _ : state) {
  568. std::string val;
  569. Status s = db->Get(ro, kg_rnd.NextNonExist(), &val);
  570. if (s.IsNotFound()) {
  571. not_found++;
  572. }
  573. }
  574. } else {
  575. for (auto _ : state) {
  576. std::string val;
  577. Status s = db->Get(ro, kg_rnd.Next(), &val);
  578. if (s.IsNotFound()) {
  579. not_found++;
  580. }
  581. }
  582. }
  583. state.counters["neg_qu_pct"] = benchmark::Counter(
  584. static_cast<double>(not_found * 100), benchmark::Counter::kAvgIterations);
  585. if (state.thread_index() == 0) {
  586. if (enable_statistics) {
  587. HistogramData histogram_data;
  588. options.statistics->histogramData(DB_GET, &histogram_data);
  589. state.counters["get_mean"] = histogram_data.average * std::milli::den;
  590. state.counters["get_p95"] = histogram_data.percentile95 * std::milli::den;
  591. state.counters["get_p99"] = histogram_data.percentile99 * std::milli::den;
  592. }
  593. TeardownDB(state, db, options, kg_rnd);
  594. }
  595. }
  596. static void DBGetArguments(benchmark::internal::Benchmark* b) {
  597. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal,
  598. kCompactionStyleFIFO}) {
  599. for (int64_t max_data : {1l << 20, 128l << 20, 512l << 20}) {
  600. for (int64_t per_key_size : {256, 1024}) {
  601. for (bool enable_statistics : {false, true}) {
  602. for (bool negative_query : {false, true}) {
  603. for (bool enable_filter : {false, true}) {
  604. for (bool mmap : {false, true}) {
  605. for (int compression_type :
  606. {kNoCompression /* 0x0 */, kZSTD /* 0x7 */}) {
  607. for (bool compression_checksum : {false, true}) {
  608. for (bool no_blockcache : {false, true}) {
  609. b->Args({comp_style, max_data, per_key_size,
  610. enable_statistics, negative_query, enable_filter,
  611. mmap, compression_type, compression_checksum,
  612. no_blockcache});
  613. }
  614. }
  615. }
  616. }
  617. }
  618. }
  619. }
  620. }
  621. }
  622. }
  623. b->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics",
  624. "negative_query", "enable_filter", "mmap", "compression_type",
  625. "compression_checksum", "no_blockcache"});
  626. }
  627. static const uint64_t DBGetNum = 10000l;
  628. BENCHMARK(DBGet)->Threads(1)->Iterations(DBGetNum)->Apply(DBGetArguments);
  629. BENCHMARK(DBGet)->Threads(8)->Iterations(DBGetNum / 8)->Apply(DBGetArguments);
  630. static void SimpleGetWithPerfContext(benchmark::State& state) {
  631. // setup DB
  632. static std::unique_ptr<DB> db;
  633. std::string db_name;
  634. Options options;
  635. options.create_if_missing = true;
  636. options.arena_block_size = 8 << 20;
  637. auto rnd = Random(301 + state.thread_index());
  638. KeyGenerator kg(&rnd, 1024);
  639. if (state.thread_index() == 0) {
  640. auto env = Env::Default();
  641. std::string db_path;
  642. Status s = env->GetTestDirectory(&db_path);
  643. if (!s.ok()) {
  644. state.SkipWithError(s.ToString().c_str());
  645. return;
  646. }
  647. db_name = db_path + "/simple_get_" + std::to_string(getpid());
  648. DestroyDB(db_name, options);
  649. {
  650. DB* db_ptr = nullptr;
  651. s = DB::Open(options, db_name, &db_ptr);
  652. if (!s.ok()) {
  653. state.SkipWithError(s.ToString().c_str());
  654. return;
  655. }
  656. db.reset(db_ptr);
  657. }
  658. // load db
  659. auto wo = WriteOptions();
  660. wo.disableWAL = true;
  661. for (uint64_t i = 0; i < 1024; i++) {
  662. s = db->Put(wo, kg.Next(), rnd.RandomString(1024));
  663. if (!s.ok()) {
  664. state.SkipWithError(s.ToString().c_str());
  665. }
  666. }
  667. auto db_full = static_cast_with_check<DBImpl>(db.get());
  668. s = db_full->WaitForCompact(WaitForCompactOptions());
  669. if (!s.ok()) {
  670. state.SkipWithError(s.ToString().c_str());
  671. return;
  672. }
  673. FlushOptions fo;
  674. s = db->Flush(fo);
  675. if (!s.ok()) {
  676. state.SkipWithError(s.ToString().c_str());
  677. }
  678. }
  679. auto ro = ReadOptions();
  680. size_t not_found = 0;
  681. uint64_t user_key_comparison_count = 0;
  682. uint64_t block_read_time = 0;
  683. uint64_t block_read_cpu_time = 0;
  684. uint64_t block_checksum_time = 0;
  685. uint64_t get_snapshot_time = 0;
  686. uint64_t get_post_process_time = 0;
  687. uint64_t get_from_output_files_time = 0;
  688. uint64_t new_table_block_iter_nanos = 0;
  689. uint64_t block_seek_nanos = 0;
  690. uint64_t get_cpu_nanos = 0;
  691. uint64_t get_from_table_nanos = 0;
  692. SetPerfLevel(kEnableTime);
  693. get_perf_context()->EnablePerLevelPerfContext();
  694. for (auto _ : state) {
  695. std::string val;
  696. get_perf_context()->Reset();
  697. Status s = db->Get(ro, kg.NextNonExist(), &val);
  698. if (s.IsNotFound()) {
  699. not_found++;
  700. }
  701. user_key_comparison_count += get_perf_context()->user_key_comparison_count;
  702. block_read_time += get_perf_context()->block_read_time;
  703. block_read_cpu_time += get_perf_context()->block_read_cpu_time;
  704. block_checksum_time += get_perf_context()->block_checksum_time;
  705. get_snapshot_time += get_perf_context()->get_snapshot_time;
  706. get_post_process_time += get_perf_context()->get_post_process_time;
  707. get_from_output_files_time +=
  708. get_perf_context()->get_from_output_files_time;
  709. new_table_block_iter_nanos +=
  710. get_perf_context()->new_table_block_iter_nanos;
  711. block_seek_nanos += get_perf_context()->block_seek_nanos;
  712. get_cpu_nanos += get_perf_context()->get_cpu_nanos;
  713. get_from_table_nanos +=
  714. (*(get_perf_context()->level_to_perf_context))[0].get_from_table_nanos;
  715. }
  716. state.counters["neg_qu_pct"] = benchmark::Counter(
  717. static_cast<double>(not_found * 100), benchmark::Counter::kAvgIterations);
  718. state.counters["user_key_comparison_count"] =
  719. benchmark::Counter(static_cast<double>(user_key_comparison_count),
  720. benchmark::Counter::kAvgIterations);
  721. state.counters["block_read_time"] = benchmark::Counter(
  722. static_cast<double>(block_read_time), benchmark::Counter::kAvgIterations);
  723. state.counters["block_read_cpu_time"] =
  724. benchmark::Counter(static_cast<double>(block_read_cpu_time),
  725. benchmark::Counter::kAvgIterations);
  726. state.counters["block_checksum_time"] =
  727. benchmark::Counter(static_cast<double>(block_checksum_time),
  728. benchmark::Counter::kAvgIterations);
  729. state.counters["get_snapshot_time"] =
  730. benchmark::Counter(static_cast<double>(get_snapshot_time),
  731. benchmark::Counter::kAvgIterations);
  732. state.counters["get_post_process_time"] =
  733. benchmark::Counter(static_cast<double>(get_post_process_time),
  734. benchmark::Counter::kAvgIterations);
  735. state.counters["get_from_output_files_time"] =
  736. benchmark::Counter(static_cast<double>(get_from_output_files_time),
  737. benchmark::Counter::kAvgIterations);
  738. state.counters["new_table_block_iter_nanos"] =
  739. benchmark::Counter(static_cast<double>(new_table_block_iter_nanos),
  740. benchmark::Counter::kAvgIterations);
  741. state.counters["block_seek_nanos"] =
  742. benchmark::Counter(static_cast<double>(block_seek_nanos),
  743. benchmark::Counter::kAvgIterations);
  744. state.counters["get_cpu_nanos"] = benchmark::Counter(
  745. static_cast<double>(get_cpu_nanos), benchmark::Counter::kAvgIterations);
  746. state.counters["get_from_table_nanos"] =
  747. benchmark::Counter(static_cast<double>(get_from_table_nanos),
  748. benchmark::Counter::kAvgIterations);
  749. if (state.thread_index() == 0) {
  750. TeardownDB(state, db, options, kg);
  751. }
  752. }
  753. BENCHMARK(SimpleGetWithPerfContext)->Iterations(1000000);
  754. static void DBGetMergeOperandsInMemtable(benchmark::State& state) {
  755. const uint64_t kDataLen = 16 << 20; // 16MB
  756. const uint64_t kValueLen = 64;
  757. const uint64_t kNumEntries = kDataLen / kValueLen;
  758. const uint64_t kNumEntriesPerKey = state.range(0);
  759. const uint64_t kNumKeys = kNumEntries / kNumEntriesPerKey;
  760. // setup DB
  761. static std::unique_ptr<DB> db;
  762. Options options;
  763. options.merge_operator = MergeOperators::CreateStringAppendOperator();
  764. // Make memtable large enough that automatic flush will not be triggered.
  765. options.write_buffer_size = 2 * kDataLen;
  766. KeyGenerator sequential_key_gen(kNumKeys);
  767. auto rnd = Random(301 + state.thread_index());
  768. if (state.thread_index() == 0) {
  769. SetupDB(state, options, &db, "DBGetMergeOperandsInMemtable");
  770. // load db
  771. auto write_opts = WriteOptions();
  772. write_opts.disableWAL = true;
  773. for (uint64_t i = 0; i < kNumEntries; i++) {
  774. Status s = db->Merge(write_opts, sequential_key_gen.Next(),
  775. rnd.RandomString(static_cast<int>(kValueLen)));
  776. if (!s.ok()) {
  777. state.SkipWithError(s.ToString().c_str());
  778. }
  779. }
  780. }
  781. KeyGenerator random_key_gen(kNumKeys);
  782. std::vector<PinnableSlice> value_operands;
  783. value_operands.resize(kNumEntriesPerKey);
  784. GetMergeOperandsOptions get_merge_ops_opts;
  785. get_merge_ops_opts.expected_max_number_of_operands =
  786. static_cast<int>(kNumEntriesPerKey);
  787. for (auto _ : state) {
  788. int num_value_operands = 0;
  789. Status s = db->GetMergeOperands(
  790. ReadOptions(), db->DefaultColumnFamily(), random_key_gen.Next(),
  791. value_operands.data(), &get_merge_ops_opts, &num_value_operands);
  792. if (!s.ok()) {
  793. state.SkipWithError(s.ToString().c_str());
  794. }
  795. if (num_value_operands != static_cast<int>(kNumEntriesPerKey)) {
  796. state.SkipWithError("Unexpected number of merge operands found for key");
  797. }
  798. for (auto& value_operand : value_operands) {
  799. value_operand.Reset();
  800. }
  801. }
  802. if (state.thread_index() == 0) {
  803. TeardownDB(state, db, options, random_key_gen);
  804. }
  805. }
  806. static void DBGetMergeOperandsInSstFile(benchmark::State& state) {
  807. const uint64_t kDataLen = 16 << 20; // 16MB
  808. const uint64_t kValueLen = 64;
  809. const uint64_t kNumEntries = kDataLen / kValueLen;
  810. const uint64_t kNumEntriesPerKey = state.range(0);
  811. const uint64_t kNumKeys = kNumEntries / kNumEntriesPerKey;
  812. const bool kMmap = state.range(1);
  813. // setup DB
  814. static std::unique_ptr<DB> db;
  815. BlockBasedTableOptions table_options;
  816. if (kMmap) {
  817. table_options.no_block_cache = true;
  818. } else {
  819. // Make block cache large enough that eviction will not be triggered.
  820. table_options.block_cache = NewLRUCache(2 * kDataLen);
  821. }
  822. Options options;
  823. if (kMmap) {
  824. options.allow_mmap_reads = true;
  825. }
  826. options.compression = kNoCompression;
  827. options.merge_operator = MergeOperators::CreateStringAppendOperator();
  828. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  829. // Make memtable large enough that automatic flush will not be triggered.
  830. options.write_buffer_size = 2 * kDataLen;
  831. KeyGenerator sequential_key_gen(kNumKeys);
  832. auto rnd = Random(301 + state.thread_index());
  833. if (state.thread_index() == 0) {
  834. SetupDB(state, options, &db, "DBGetMergeOperandsInBlockCache");
  835. // load db
  836. //
  837. // Take a snapshot after each cycle of merges to ensure flush cannot
  838. // merge any entries.
  839. std::vector<const Snapshot*> snapshots;
  840. snapshots.resize(kNumEntriesPerKey);
  841. auto write_opts = WriteOptions();
  842. write_opts.disableWAL = true;
  843. for (uint64_t i = 0; i < kNumEntriesPerKey; i++) {
  844. for (uint64_t j = 0; j < kNumKeys; j++) {
  845. Status s = db->Merge(write_opts, sequential_key_gen.Next(),
  846. rnd.RandomString(static_cast<int>(kValueLen)));
  847. if (!s.ok()) {
  848. state.SkipWithError(s.ToString().c_str());
  849. }
  850. }
  851. snapshots[i] = db->GetSnapshot();
  852. }
  853. // Flush to an L0 file; read back to prime the cache/mapped memory.
  854. db->Flush(FlushOptions());
  855. for (uint64_t i = 0; i < kNumKeys; ++i) {
  856. std::string value;
  857. Status s = db->Get(ReadOptions(), sequential_key_gen.Next(), &value);
  858. if (!s.ok()) {
  859. state.SkipWithError(s.ToString().c_str());
  860. }
  861. }
  862. if (state.thread_index() == 0) {
  863. for (uint64_t i = 0; i < kNumEntriesPerKey; ++i) {
  864. db->ReleaseSnapshot(snapshots[i]);
  865. }
  866. }
  867. }
  868. KeyGenerator random_key_gen(kNumKeys);
  869. std::vector<PinnableSlice> value_operands;
  870. value_operands.resize(kNumEntriesPerKey);
  871. GetMergeOperandsOptions get_merge_ops_opts;
  872. get_merge_ops_opts.expected_max_number_of_operands =
  873. static_cast<int>(kNumEntriesPerKey);
  874. for (auto _ : state) {
  875. int num_value_operands = 0;
  876. ReadOptions read_opts;
  877. read_opts.verify_checksums = false;
  878. Status s = db->GetMergeOperands(
  879. read_opts, db->DefaultColumnFamily(), random_key_gen.Next(),
  880. value_operands.data(), &get_merge_ops_opts, &num_value_operands);
  881. if (!s.ok()) {
  882. state.SkipWithError(s.ToString().c_str());
  883. }
  884. if (num_value_operands != static_cast<int>(kNumEntriesPerKey)) {
  885. state.SkipWithError("Unexpected number of merge operands found for key");
  886. }
  887. for (auto& value_operand : value_operands) {
  888. value_operand.Reset();
  889. }
  890. }
  891. if (state.thread_index() == 0) {
  892. TeardownDB(state, db, options, random_key_gen);
  893. }
  894. }
  895. static void DBGetMergeOperandsInMemtableArguments(
  896. benchmark::internal::Benchmark* b) {
  897. for (int entries_per_key : {1, 32, 1024}) {
  898. b->Args({entries_per_key});
  899. }
  900. b->ArgNames({"entries_per_key"});
  901. }
  902. static void DBGetMergeOperandsInSstFileArguments(
  903. benchmark::internal::Benchmark* b) {
  904. for (int entries_per_key : {1, 32, 1024}) {
  905. for (bool mmap : {false, true}) {
  906. b->Args({entries_per_key, mmap});
  907. }
  908. }
  909. b->ArgNames({"entries_per_key", "mmap"});
  910. }
  911. BENCHMARK(DBGetMergeOperandsInMemtable)
  912. ->Threads(1)
  913. ->Apply(DBGetMergeOperandsInMemtableArguments);
  914. BENCHMARK(DBGetMergeOperandsInMemtable)
  915. ->Threads(8)
  916. ->Apply(DBGetMergeOperandsInMemtableArguments);
  917. BENCHMARK(DBGetMergeOperandsInSstFile)
  918. ->Threads(1)
  919. ->Apply(DBGetMergeOperandsInSstFileArguments);
  920. BENCHMARK(DBGetMergeOperandsInSstFile)
  921. ->Threads(8)
  922. ->Apply(DBGetMergeOperandsInSstFileArguments);
  923. std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
  924. Random* rnd) {
  925. char buf[50];
  926. char* p = &buf[0];
  927. snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key);
  928. std::string k(p);
  929. if (padding_size) {
  930. k += rnd->RandomString(padding_size);
  931. }
  932. return k;
  933. }
  934. void GenerateRandomKVs(std::vector<std::string>* keys,
  935. std::vector<std::string>* values, const int from,
  936. const int len, const int step = 1,
  937. const int padding_size = 0,
  938. const int keys_share_prefix = 1) {
  939. Random rnd(302);
  940. // generate different prefix
  941. for (int i = from; i < from + len; i += step) {
  942. // generating keys that share the prefix
  943. for (int j = 0; j < keys_share_prefix; ++j) {
  944. keys->emplace_back(GenerateKey(i, j, padding_size, &rnd));
  945. // 100 bytes values
  946. values->emplace_back(rnd.RandomString(100));
  947. }
  948. }
  949. }
  950. // TODO: move it to different files, as it's testing an internal API
  951. static void DataBlockSeek(benchmark::State& state) {
  952. Random rnd(301);
  953. Options options = Options();
  954. BlockBuilder builder(16, true, false,
  955. BlockBasedTableOptions::kDataBlockBinarySearch);
  956. int num_records = 500;
  957. std::vector<std::string> keys;
  958. std::vector<std::string> values;
  959. GenerateRandomKVs(&keys, &values, 0, num_records);
  960. for (int i = 0; i < num_records; i++) {
  961. std::string ukey(keys[i] + "1");
  962. InternalKey ikey(ukey, 0, kTypeValue);
  963. builder.Add(ikey.Encode().ToString(), values[i]);
  964. }
  965. Slice rawblock = builder.Finish();
  966. BlockContents contents;
  967. contents.data = rawblock;
  968. Block reader(std::move(contents));
  969. SetPerfLevel(kEnableTime);
  970. uint64_t total = 0;
  971. for (auto _ : state) {
  972. DataBlockIter* iter = reader.NewDataIterator(options.comparator,
  973. kDisableGlobalSequenceNumber);
  974. uint32_t index = rnd.Uniform(static_cast<int>(num_records));
  975. std::string ukey(keys[index] + "1");
  976. InternalKey ikey(ukey, 0, kTypeValue);
  977. get_perf_context()->Reset();
  978. bool may_exist = iter->SeekForGet(ikey.Encode().ToString());
  979. if (!may_exist) {
  980. state.SkipWithError("key not found");
  981. }
  982. total += get_perf_context()->block_seek_nanos;
  983. delete iter;
  984. }
  985. state.counters["seek_ns"] = benchmark::Counter(
  986. static_cast<double>(total), benchmark::Counter::kAvgIterations);
  987. }
  988. BENCHMARK(DataBlockSeek)->Iterations(1000000);
  989. static void IteratorSeek(benchmark::State& state) {
  990. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  991. uint64_t max_data = state.range(1);
  992. uint64_t per_key_size = state.range(2);
  993. bool enable_statistics = state.range(3);
  994. bool negative_query = state.range(4);
  995. bool enable_filter = state.range(5);
  996. uint64_t key_num = max_data / per_key_size;
  997. // setup DB
  998. static std::unique_ptr<DB> db;
  999. Options options;
  1000. if (enable_statistics) {
  1001. options.statistics = CreateDBStatistics();
  1002. }
  1003. options.compaction_style = compaction_style;
  1004. if (enable_filter) {
  1005. BlockBasedTableOptions table_options;
  1006. table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
  1007. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  1008. }
  1009. auto rnd = Random(301 + state.thread_index());
  1010. KeyGenerator kg(&rnd, key_num);
  1011. if (state.thread_index() == 0) {
  1012. SetupDB(state, options, &db, "IteratorSeek");
  1013. // load db
  1014. auto wo = WriteOptions();
  1015. wo.disableWAL = true;
  1016. for (uint64_t i = 0; i < key_num; i++) {
  1017. Status s = db->Put(wo, kg.Next(),
  1018. rnd.RandomString(static_cast<int>(per_key_size)));
  1019. if (!s.ok()) {
  1020. state.SkipWithError(s.ToString().c_str());
  1021. }
  1022. }
  1023. FlushOptions fo;
  1024. Status s = db->Flush(fo);
  1025. if (!s.ok()) {
  1026. state.SkipWithError(s.ToString().c_str());
  1027. }
  1028. auto db_full = static_cast_with_check<DBImpl>(db.get());
  1029. s = db_full->WaitForCompact(WaitForCompactOptions());
  1030. if (!s.ok()) {
  1031. state.SkipWithError(s.ToString().c_str());
  1032. return;
  1033. }
  1034. }
  1035. for (auto _ : state) {
  1036. std::unique_ptr<Iterator> iter{nullptr};
  1037. state.PauseTiming();
  1038. if (!iter) {
  1039. iter.reset(db->NewIterator(ReadOptions()));
  1040. }
  1041. Slice key = negative_query ? kg.NextNonExist() : kg.Next();
  1042. if (!iter->status().ok()) {
  1043. state.SkipWithError(iter->status().ToString().c_str());
  1044. return;
  1045. }
  1046. state.ResumeTiming();
  1047. iter->Seek(key);
  1048. }
  1049. if (state.thread_index() == 0) {
  1050. TeardownDB(state, db, options, kg);
  1051. }
  1052. }
  1053. static void IteratorSeekArguments(benchmark::internal::Benchmark* b) {
  1054. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal,
  1055. kCompactionStyleFIFO}) {
  1056. for (int64_t max_data : {128l << 20, 512l << 20}) {
  1057. for (int64_t per_key_size : {256, 1024}) {
  1058. for (bool enable_statistics : {false, true}) {
  1059. for (bool negative_query : {false, true}) {
  1060. for (bool enable_filter : {false, true}) {
  1061. b->Args({comp_style, max_data, per_key_size, enable_statistics,
  1062. negative_query, enable_filter});
  1063. }
  1064. }
  1065. }
  1066. }
  1067. }
  1068. }
  1069. b->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics",
  1070. "negative_query", "enable_filter"});
  1071. }
  1072. static constexpr uint64_t kDBSeekNum = 10l << 10;
  1073. BENCHMARK(IteratorSeek)
  1074. ->Threads(1)
  1075. ->Iterations(kDBSeekNum)
  1076. ->Apply(IteratorSeekArguments);
  1077. BENCHMARK(IteratorSeek)
  1078. ->Threads(8)
  1079. ->Iterations(kDBSeekNum / 8)
  1080. ->Apply(IteratorSeekArguments);
  1081. static void IteratorNext(benchmark::State& state) {
  1082. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  1083. uint64_t max_data = state.range(1);
  1084. uint64_t per_key_size = state.range(2);
  1085. uint64_t key_num = max_data / per_key_size;
  1086. // setup DB
  1087. static std::unique_ptr<DB> db;
  1088. Options options;
  1089. options.compaction_style = compaction_style;
  1090. auto rnd = Random(301 + state.thread_index());
  1091. KeyGenerator kg(&rnd, key_num);
  1092. if (state.thread_index() == 0) {
  1093. SetupDB(state, options, &db, "IteratorNext");
  1094. // load db
  1095. auto wo = WriteOptions();
  1096. wo.disableWAL = true;
  1097. for (uint64_t i = 0; i < key_num; i++) {
  1098. Status s = db->Put(wo, kg.Next(),
  1099. rnd.RandomString(static_cast<int>(per_key_size)));
  1100. if (!s.ok()) {
  1101. state.SkipWithError(s.ToString().c_str());
  1102. }
  1103. }
  1104. FlushOptions fo;
  1105. Status s = db->Flush(fo);
  1106. if (!s.ok()) {
  1107. state.SkipWithError(s.ToString().c_str());
  1108. }
  1109. auto db_full = static_cast_with_check<DBImpl>(db.get());
  1110. s = db_full->WaitForCompact(WaitForCompactOptions());
  1111. if (!s.ok()) {
  1112. state.SkipWithError(s.ToString().c_str());
  1113. return;
  1114. }
  1115. }
  1116. for (auto _ : state) {
  1117. std::unique_ptr<Iterator> iter{nullptr};
  1118. state.PauseTiming();
  1119. if (!iter) {
  1120. iter.reset(db->NewIterator(ReadOptions()));
  1121. }
  1122. while (!iter->Valid()) {
  1123. iter->Seek(kg.Next());
  1124. if (!iter->status().ok()) {
  1125. state.SkipWithError(iter->status().ToString().c_str());
  1126. }
  1127. }
  1128. state.ResumeTiming();
  1129. iter->Next();
  1130. }
  1131. if (state.thread_index() == 0) {
  1132. TeardownDB(state, db, options, kg);
  1133. }
  1134. }
  1135. static void IteratorNextArguments(benchmark::internal::Benchmark* b) {
  1136. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal,
  1137. kCompactionStyleFIFO}) {
  1138. for (int64_t max_data : {128l << 20, 512l << 20}) {
  1139. for (int64_t per_key_size : {256, 1024}) {
  1140. b->Args({comp_style, max_data, per_key_size});
  1141. }
  1142. }
  1143. }
  1144. b->ArgNames({"comp_style", "max_data", "per_key_size"});
  1145. }
  1146. static constexpr uint64_t kIteratorNextNum = 10l << 10;
  1147. BENCHMARK(IteratorNext)
  1148. ->Iterations(kIteratorNextNum)
  1149. ->Apply(IteratorNextArguments);
  1150. static void IteratorNextWithPerfContext(benchmark::State& state) {
  1151. // setup DB
  1152. static std::unique_ptr<DB> db;
  1153. Options options;
  1154. auto rnd = Random(301 + state.thread_index());
  1155. KeyGenerator kg(&rnd, 1024);
  1156. if (state.thread_index() == 0) {
  1157. SetupDB(state, options, &db, "IteratorNextWithPerfContext");
  1158. // load db
  1159. auto wo = WriteOptions();
  1160. wo.disableWAL = true;
  1161. for (uint64_t i = 0; i < 1024; i++) {
  1162. Status s = db->Put(wo, kg.Next(), rnd.RandomString(1024));
  1163. if (!s.ok()) {
  1164. state.SkipWithError(s.ToString().c_str());
  1165. }
  1166. }
  1167. auto db_full = static_cast_with_check<DBImpl>(db.get());
  1168. Status s = db_full->WaitForCompact(WaitForCompactOptions());
  1169. if (!s.ok()) {
  1170. state.SkipWithError(s.ToString().c_str());
  1171. return;
  1172. }
  1173. FlushOptions fo;
  1174. s = db->Flush(fo);
  1175. if (!s.ok()) {
  1176. state.SkipWithError(s.ToString().c_str());
  1177. }
  1178. }
  1179. uint64_t user_key_comparison_count = 0;
  1180. uint64_t internal_key_skipped_count = 0;
  1181. uint64_t find_next_user_entry_time = 0;
  1182. uint64_t iter_next_cpu_nanos = 0;
  1183. SetPerfLevel(kEnableTime);
  1184. get_perf_context()->EnablePerLevelPerfContext();
  1185. for (auto _ : state) {
  1186. std::unique_ptr<Iterator> iter{nullptr};
  1187. state.PauseTiming();
  1188. if (!iter) {
  1189. iter.reset(db->NewIterator(ReadOptions()));
  1190. }
  1191. while (!iter->Valid()) {
  1192. iter->Seek(kg.Next());
  1193. if (!iter->status().ok()) {
  1194. state.SkipWithError(iter->status().ToString().c_str());
  1195. }
  1196. }
  1197. get_perf_context()->Reset();
  1198. state.ResumeTiming();
  1199. iter->Next();
  1200. user_key_comparison_count += get_perf_context()->user_key_comparison_count;
  1201. internal_key_skipped_count +=
  1202. get_perf_context()->internal_key_skipped_count;
  1203. find_next_user_entry_time += get_perf_context()->find_next_user_entry_time;
  1204. iter_next_cpu_nanos += get_perf_context()->iter_next_cpu_nanos;
  1205. }
  1206. state.counters["user_key_comparison_count"] =
  1207. benchmark::Counter(static_cast<double>(user_key_comparison_count),
  1208. benchmark::Counter::kAvgIterations);
  1209. state.counters["internal_key_skipped_count"] =
  1210. benchmark::Counter(static_cast<double>(internal_key_skipped_count),
  1211. benchmark::Counter::kAvgIterations);
  1212. state.counters["find_next_user_entry_time"] =
  1213. benchmark::Counter(static_cast<double>(find_next_user_entry_time),
  1214. benchmark::Counter::kAvgIterations);
  1215. state.counters["iter_next_cpu_nanos"] =
  1216. benchmark::Counter(static_cast<double>(iter_next_cpu_nanos),
  1217. benchmark::Counter::kAvgIterations);
  1218. if (state.thread_index() == 0) {
  1219. TeardownDB(state, db, options, kg);
  1220. }
  1221. }
  1222. BENCHMARK(IteratorNextWithPerfContext)->Iterations(100000);
  1223. static void IteratorPrev(benchmark::State& state) {
  1224. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  1225. uint64_t max_data = state.range(1);
  1226. uint64_t per_key_size = state.range(2);
  1227. uint64_t key_num = max_data / per_key_size;
  1228. // setup DB
  1229. static std::unique_ptr<DB> db;
  1230. std::string db_name;
  1231. Options options;
  1232. options.compaction_style = compaction_style;
  1233. auto rnd = Random(301 + state.thread_index());
  1234. KeyGenerator kg(&rnd, key_num);
  1235. if (state.thread_index() == 0) {
  1236. SetupDB(state, options, &db, "IteratorPrev");
  1237. // load db
  1238. auto wo = WriteOptions();
  1239. wo.disableWAL = true;
  1240. for (uint64_t i = 0; i < key_num; i++) {
  1241. Status s = db->Put(wo, kg.Next(),
  1242. rnd.RandomString(static_cast<int>(per_key_size)));
  1243. if (!s.ok()) {
  1244. state.SkipWithError(s.ToString().c_str());
  1245. }
  1246. }
  1247. FlushOptions fo;
  1248. Status s = db->Flush(fo);
  1249. if (!s.ok()) {
  1250. state.SkipWithError(s.ToString().c_str());
  1251. }
  1252. auto db_full = static_cast_with_check<DBImpl>(db.get());
  1253. s = db_full->WaitForCompact(WaitForCompactOptions());
  1254. if (!s.ok()) {
  1255. state.SkipWithError(s.ToString().c_str());
  1256. return;
  1257. }
  1258. }
  1259. for (auto _ : state) {
  1260. std::unique_ptr<Iterator> iter{nullptr};
  1261. state.PauseTiming();
  1262. if (!iter) {
  1263. iter.reset(db->NewIterator(ReadOptions()));
  1264. }
  1265. while (!iter->Valid()) {
  1266. iter->Seek(kg.Next());
  1267. if (!iter->status().ok()) {
  1268. state.SkipWithError(iter->status().ToString().c_str());
  1269. }
  1270. }
  1271. state.ResumeTiming();
  1272. iter->Prev();
  1273. }
  1274. if (state.thread_index() == 0) {
  1275. TeardownDB(state, db, options, kg);
  1276. }
  1277. }
  1278. static void IteratorPrevArguments(benchmark::internal::Benchmark* b) {
  1279. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal,
  1280. kCompactionStyleFIFO}) {
  1281. for (int64_t max_data : {128l << 20, 512l << 20}) {
  1282. for (int64_t per_key_size : {256, 1024}) {
  1283. b->Args({comp_style, max_data, per_key_size});
  1284. }
  1285. }
  1286. }
  1287. b->ArgNames({"comp_style", "max_data", "per_key_size"});
  1288. }
  1289. static constexpr uint64_t kIteratorPrevNum = 10l << 10;
  1290. BENCHMARK(IteratorPrev)
  1291. ->Iterations(kIteratorPrevNum)
  1292. ->Apply(IteratorPrevArguments);
  1293. static void PrefixSeek(benchmark::State& state) {
  1294. auto compaction_style = static_cast<CompactionStyle>(state.range(0));
  1295. uint64_t max_data = state.range(1);
  1296. uint64_t per_key_size = state.range(2);
  1297. bool enable_statistics = state.range(3);
  1298. bool enable_filter = state.range(4);
  1299. uint64_t key_num = max_data / per_key_size;
  1300. // setup DB
  1301. static std::unique_ptr<DB> db;
  1302. Options options;
  1303. if (enable_statistics) {
  1304. options.statistics = CreateDBStatistics();
  1305. }
  1306. options.compaction_style = compaction_style;
  1307. options.prefix_extractor.reset(NewFixedPrefixTransform(4));
  1308. if (enable_filter) {
  1309. BlockBasedTableOptions table_options;
  1310. table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
  1311. options.table_factory.reset(NewBlockBasedTableFactory(table_options));
  1312. }
  1313. auto rnd = Random(301 + state.thread_index());
  1314. KeyGenerator kg(&rnd, key_num, key_num / 100);
  1315. if (state.thread_index() == 0) {
  1316. SetupDB(state, options, &db, "PrefixSeek");
  1317. // load db
  1318. auto wo = WriteOptions();
  1319. wo.disableWAL = true;
  1320. for (uint64_t i = 0; i < key_num; i++) {
  1321. Status s = db->Put(wo, kg.Next(),
  1322. rnd.RandomString(static_cast<int>(per_key_size)));
  1323. if (!s.ok()) {
  1324. state.SkipWithError(s.ToString().c_str());
  1325. }
  1326. }
  1327. FlushOptions fo;
  1328. Status s = db->Flush(fo);
  1329. if (!s.ok()) {
  1330. state.SkipWithError(s.ToString().c_str());
  1331. }
  1332. auto db_full = static_cast_with_check<DBImpl>(db.get());
  1333. s = db_full->WaitForCompact(WaitForCompactOptions());
  1334. if (!s.ok()) {
  1335. state.SkipWithError(s.ToString().c_str());
  1336. return;
  1337. }
  1338. }
  1339. for (auto _ : state) {
  1340. std::unique_ptr<Iterator> iter{nullptr};
  1341. state.PauseTiming();
  1342. if (!iter) {
  1343. iter.reset(db->NewIterator(ReadOptions()));
  1344. }
  1345. state.ResumeTiming();
  1346. iter->Seek(kg.NextPrefix());
  1347. if (!iter->status().ok()) {
  1348. state.SkipWithError(iter->status().ToString().c_str());
  1349. return;
  1350. }
  1351. }
  1352. if (state.thread_index() == 0) {
  1353. TeardownDB(state, db, options, kg);
  1354. }
  1355. }
  1356. static void PrefixSeekArguments(benchmark::internal::Benchmark* b) {
  1357. for (int comp_style : {kCompactionStyleLevel, kCompactionStyleUniversal,
  1358. kCompactionStyleFIFO}) {
  1359. for (int64_t max_data : {128l << 20, 512l << 20}) {
  1360. for (int64_t per_key_size : {256, 1024}) {
  1361. for (bool enable_statistics : {false, true}) {
  1362. for (bool enable_filter : {false, true}) {
  1363. b->Args({comp_style, max_data, per_key_size, enable_statistics,
  1364. enable_filter});
  1365. }
  1366. }
  1367. }
  1368. }
  1369. }
  1370. b->ArgNames({"comp_style", "max_data", "per_key_size", "enable_statistics",
  1371. "enable_filter"});
  1372. }
  1373. static constexpr uint64_t kPrefixSeekNum = 10l << 10;
  1374. BENCHMARK(PrefixSeek)->Iterations(kPrefixSeekNum)->Apply(PrefixSeekArguments);
  1375. BENCHMARK(PrefixSeek)
  1376. ->Threads(8)
  1377. ->Iterations(kPrefixSeekNum / 8)
  1378. ->Apply(PrefixSeekArguments);
  1379. // TODO: move it to different files, as it's testing an internal API
  1380. static void RandomAccessFileReaderRead(benchmark::State& state) {
  1381. bool enable_statistics = state.range(0);
  1382. constexpr int kFileNum = 10;
  1383. auto env = Env::Default();
  1384. auto fs = env->GetFileSystem();
  1385. std::string db_path;
  1386. Status s = env->GetTestDirectory(&db_path);
  1387. if (!s.ok()) {
  1388. state.SkipWithError(s.ToString().c_str());
  1389. return;
  1390. }
  1391. // Setup multiple `RandomAccessFileReader`s with different parameters to be
  1392. // used for test
  1393. Random rand(301);
  1394. std::string fname_base =
  1395. db_path + kFilePathSeparator + "random-access-file-reader-read";
  1396. std::vector<std::unique_ptr<RandomAccessFileReader>> readers;
  1397. auto statistics_share = CreateDBStatistics();
  1398. Statistics* statistics = enable_statistics ? statistics_share.get() : nullptr;
  1399. for (int i = 0; i < kFileNum; i++) {
  1400. std::string fname = fname_base + std::to_string(i);
  1401. std::string content = rand.RandomString(kDefaultPageSize);
  1402. std::unique_ptr<WritableFile> tgt_file;
  1403. env->NewWritableFile(fname, &tgt_file, EnvOptions());
  1404. tgt_file->Append(content);
  1405. tgt_file->Close();
  1406. std::unique_ptr<FSRandomAccessFile> f;
  1407. fs->NewRandomAccessFile(fname, FileOptions(), &f, nullptr);
  1408. int rand_num = rand.Next() % 3;
  1409. auto temperature = rand_num == 0 ? Temperature::kUnknown
  1410. : rand_num == 1 ? Temperature::kWarm
  1411. : Temperature::kCold;
  1412. readers.emplace_back(new RandomAccessFileReader(
  1413. std::move(f), fname, env->GetSystemClock().get(), nullptr, statistics,
  1414. Histograms::HISTOGRAM_ENUM_MAX, nullptr, nullptr, {}, temperature,
  1415. rand_num == 1));
  1416. }
  1417. IOOptions io_options;
  1418. std::unique_ptr<char[]> scratch(new char[2048]);
  1419. Slice result;
  1420. uint64_t idx = 0;
  1421. for (auto _ : state) {
  1422. s = readers[idx++ % kFileNum]->Read(io_options, 0, kDefaultPageSize / 3,
  1423. &result, scratch.get(), nullptr);
  1424. if (!s.ok()) {
  1425. state.SkipWithError(s.ToString().c_str());
  1426. }
  1427. }
  1428. // clean up
  1429. for (int i = 0; i < kFileNum; i++) {
  1430. std::string fname = fname_base + std::to_string(i);
  1431. env->DeleteFile(fname); // ignore return, okay to fail cleanup
  1432. }
  1433. }
  1434. BENCHMARK(RandomAccessFileReaderRead)
  1435. ->Iterations(1000000)
  1436. ->Arg(0)
  1437. ->Arg(1)
  1438. ->ArgName("enable_statistics");
  1439. } // namespace ROCKSDB_NAMESPACE
  1440. BENCHMARK_MAIN();