memtablerep_bench.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #ifndef GFLAGS
  10. #include <cstdio>
  11. int main() {
  12. fprintf(stderr, "Please install gflags to run rocksdb tools\n");
  13. return 1;
  14. }
  15. #else
  16. #include <atomic>
  17. #include <iostream>
  18. #include <memory>
  19. #include <thread>
  20. #include <type_traits>
  21. #include <vector>
  22. #include "db/dbformat.h"
  23. #include "db/memtable.h"
  24. #include "memory/arena.h"
  25. #include "port/port.h"
  26. #include "port/stack_trace.h"
  27. #include "rocksdb/comparator.h"
  28. #include "rocksdb/convenience.h"
  29. #include "rocksdb/memtablerep.h"
  30. #include "rocksdb/options.h"
  31. #include "rocksdb/slice_transform.h"
  32. #include "rocksdb/system_clock.h"
  33. #include "rocksdb/write_buffer_manager.h"
  34. #include "test_util/testutil.h"
  35. #include "util/gflags_compat.h"
  36. #include "util/mutexlock.h"
  37. #include "util/stop_watch.h"
  38. using GFLAGS_NAMESPACE::ParseCommandLineFlags;
  39. using GFLAGS_NAMESPACE::RegisterFlagValidator;
  40. using GFLAGS_NAMESPACE::SetUsageMessage;
  41. DEFINE_string(benchmarks, "fillrandom",
  42. "Comma-separated list of benchmarks to run. Options:\n"
  43. "\tfillrandom -- write N random values\n"
  44. "\tfillseq -- write N values in sequential order\n"
  45. "\treadrandom -- read N values in random order\n"
  46. "\treadseq -- scan the DB\n"
  47. "\treadwrite -- 1 thread writes while N - 1 threads "
  48. "do random\n"
  49. "\t reads\n"
  50. "\tseqreadwrite -- 1 thread writes while N - 1 threads "
  51. "do scans\n");
  52. DEFINE_string(memtablerep, "skiplist",
  53. "Which implementation of memtablerep to use. See "
  54. "include/memtablerep.h for\n"
  55. " more details. Options:\n"
  56. "\tskiplist -- backed by a skiplist\n"
  57. "\tvector -- backed by an std::vector\n"
  58. "\thashskiplist -- backed by a hash skip list\n"
  59. "\thashlinklist -- backed by a hash linked list\n"
  60. "\tcuckoo -- backed by a cuckoo hash table");
  61. DEFINE_int64(bucket_count, 1000000,
  62. "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
  63. "NewHashLinkListRepFactory");
  64. DEFINE_int32(
  65. hashskiplist_height, 4,
  66. "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
  67. DEFINE_int32(
  68. hashskiplist_branching_factor, 4,
  69. "branching_factor parameter to pass into NewHashSkiplistRepFactory");
  70. DEFINE_int32(
  71. huge_page_tlb_size, 0,
  72. "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
  73. DEFINE_int32(bucket_entries_logging_threshold, 4096,
  74. "bucket_entries_logging_threshold parameter to pass into "
  75. "NewHashLinkListRepFactory");
  76. DEFINE_bool(if_log_bucket_dist_when_flash, true,
  77. "if_log_bucket_dist_when_flash parameter to pass into "
  78. "NewHashLinkListRepFactory");
  79. DEFINE_int32(
  80. threshold_use_skiplist, 256,
  81. "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
  82. DEFINE_int64(write_buffer_size, 256,
  83. "write_buffer_size parameter to pass into WriteBufferManager");
  84. DEFINE_int32(
  85. num_threads, 1,
  86. "Number of concurrent threads to run. If the benchmark includes writes,\n"
  87. "then at most one thread will be a writer");
  88. DEFINE_int32(num_operations, 1000000,
  89. "Number of operations to do for write and random read benchmarks");
  90. DEFINE_int32(num_scans, 10,
  91. "Number of times for each thread to scan the memtablerep for "
  92. "sequential read "
  93. "benchmarks");
  94. DEFINE_int32(item_size, 100, "Number of bytes each item should be");
  95. DEFINE_int32(prefix_length, 8,
  96. "Prefix length to pass into NewFixedPrefixTransform");
  97. /* VectorRep settings */
  98. DEFINE_int64(vectorrep_count, 0,
  99. "Number of entries to reserve on VectorRep initialization");
  100. DEFINE_int64(seed, 0,
  101. "Seed base for random number generators. "
  102. "When 0 it is deterministic.");
  103. namespace ROCKSDB_NAMESPACE {
  104. namespace {
  105. struct CallbackVerifyArgs {
  106. bool found;
  107. LookupKey* key;
  108. MemTableRep* table;
  109. InternalKeyComparator* comparator;
  110. };
  111. } // namespace
  112. // Helper for quickly generating random data.
  113. class RandomGenerator {
  114. private:
  115. std::string data_;
  116. unsigned int pos_;
  117. public:
  118. RandomGenerator() {
  119. Random rnd(301);
  120. auto size = (unsigned)std::max(1048576, FLAGS_item_size);
  121. data_ = rnd.RandomString(size);
  122. pos_ = 0;
  123. }
  124. Slice Generate(unsigned int len) {
  125. assert(len <= data_.size());
  126. if (pos_ + len > data_.size()) {
  127. pos_ = 0;
  128. }
  129. pos_ += len;
  130. return Slice(data_.data() + pos_ - len, len);
  131. }
  132. };
  133. enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
  134. class KeyGenerator {
  135. public:
  136. KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
  137. : rand_(rand), mode_(mode), num_(num), next_(0) {
  138. if (mode_ == UNIQUE_RANDOM) {
  139. // NOTE: if memory consumption of this approach becomes a concern,
  140. // we can either break it into pieces and only random shuffle a section
  141. // each time. Alternatively, use a bit map implementation
  142. // (https://reviews.facebook.net/differential/diff/54627/)
  143. values_.resize(num_);
  144. for (uint64_t i = 0; i < num_; ++i) {
  145. values_[i] = i;
  146. }
  147. RandomShuffle(values_.begin(), values_.end(),
  148. static_cast<uint32_t>(FLAGS_seed));
  149. }
  150. }
  151. uint64_t Next() {
  152. switch (mode_) {
  153. case SEQUENTIAL:
  154. return next_++;
  155. case RANDOM:
  156. return rand_->Next() % num_;
  157. case UNIQUE_RANDOM:
  158. return values_[next_++];
  159. }
  160. assert(false);
  161. return std::numeric_limits<uint64_t>::max();
  162. }
  163. private:
  164. Random64* rand_;
  165. WriteMode mode_;
  166. const uint64_t num_;
  167. uint64_t next_;
  168. std::vector<uint64_t> values_;
  169. };
  170. class BenchmarkThread {
  171. public:
  172. explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  173. uint64_t* bytes_written, uint64_t* bytes_read,
  174. uint64_t* sequence, uint64_t num_ops,
  175. uint64_t* read_hits)
  176. : table_(table),
  177. key_gen_(key_gen),
  178. bytes_written_(bytes_written),
  179. bytes_read_(bytes_read),
  180. sequence_(sequence),
  181. num_ops_(num_ops),
  182. read_hits_(read_hits) {}
  183. virtual void operator()() = 0;
  184. virtual ~BenchmarkThread() {}
  185. protected:
  186. MemTableRep* table_;
  187. KeyGenerator* key_gen_;
  188. uint64_t* bytes_written_;
  189. uint64_t* bytes_read_;
  190. uint64_t* sequence_;
  191. uint64_t num_ops_;
  192. uint64_t* read_hits_;
  193. RandomGenerator generator_;
  194. };
  195. class FillBenchmarkThread : public BenchmarkThread {
  196. public:
  197. FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  198. uint64_t* bytes_written, uint64_t* bytes_read,
  199. uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
  200. : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  201. num_ops, read_hits) {}
  202. void FillOne() {
  203. char* buf = nullptr;
  204. auto internal_key_size = 16;
  205. auto encoded_len =
  206. FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
  207. KeyHandle handle = table_->Allocate(encoded_len, &buf);
  208. assert(buf != nullptr);
  209. char* p = EncodeVarint32(buf, internal_key_size);
  210. auto key = key_gen_->Next();
  211. EncodeFixed64(p, key);
  212. p += 8;
  213. EncodeFixed64(p, ++(*sequence_));
  214. p += 8;
  215. Slice bytes = generator_.Generate(FLAGS_item_size);
  216. memcpy(p, bytes.data(), FLAGS_item_size);
  217. p += FLAGS_item_size;
  218. assert(p == buf + encoded_len);
  219. table_->Insert(handle);
  220. *bytes_written_ += encoded_len;
  221. }
  222. void operator()() override {
  223. for (unsigned int i = 0; i < num_ops_; ++i) {
  224. FillOne();
  225. }
  226. }
  227. };
  228. class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
  229. public:
  230. ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  231. uint64_t* bytes_written, uint64_t* bytes_read,
  232. uint64_t* sequence, uint64_t num_ops,
  233. uint64_t* read_hits,
  234. std::atomic_int* threads_done)
  235. : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  236. num_ops, read_hits) {
  237. threads_done_ = threads_done;
  238. }
  239. void operator()() override {
  240. // # of read threads will be total threads - write threads (always 1). Loop
  241. // while all reads complete.
  242. while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
  243. FillOne();
  244. }
  245. }
  246. private:
  247. std::atomic_int* threads_done_;
  248. };
  249. class ReadBenchmarkThread : public BenchmarkThread {
  250. public:
  251. ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  252. uint64_t* bytes_written, uint64_t* bytes_read,
  253. uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
  254. : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  255. num_ops, read_hits) {}
  256. static bool callback(void* arg, const char* entry) {
  257. CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
  258. assert(callback_args != nullptr);
  259. uint32_t key_length;
  260. const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
  261. if ((callback_args->comparator)
  262. ->user_comparator()
  263. ->Equal(Slice(key_ptr, key_length - 8),
  264. callback_args->key->user_key())) {
  265. callback_args->found = true;
  266. }
  267. return false;
  268. }
  269. void ReadOne() {
  270. std::string user_key;
  271. auto key = key_gen_->Next();
  272. PutFixed64(&user_key, key);
  273. LookupKey lookup_key(user_key, *sequence_);
  274. InternalKeyComparator internal_key_comp(BytewiseComparator());
  275. CallbackVerifyArgs verify_args;
  276. verify_args.found = false;
  277. verify_args.key = &lookup_key;
  278. verify_args.table = table_;
  279. verify_args.comparator = &internal_key_comp;
  280. table_->Get(lookup_key, &verify_args, callback);
  281. if (verify_args.found) {
  282. *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
  283. ++*read_hits_;
  284. }
  285. }
  286. void operator()() override {
  287. for (unsigned int i = 0; i < num_ops_; ++i) {
  288. ReadOne();
  289. }
  290. }
  291. };
  292. class SeqReadBenchmarkThread : public BenchmarkThread {
  293. public:
  294. SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  295. uint64_t* bytes_written, uint64_t* bytes_read,
  296. uint64_t* sequence, uint64_t num_ops,
  297. uint64_t* read_hits)
  298. : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  299. num_ops, read_hits) {}
  300. void ReadOneSeq() {
  301. std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
  302. for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
  303. // pretend to read the value
  304. *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
  305. }
  306. ++*read_hits_;
  307. }
  308. void operator()() override {
  309. for (unsigned int i = 0; i < num_ops_; ++i) {
  310. {
  311. ReadOneSeq();
  312. }
  313. }
  314. }
  315. };
  316. class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
  317. public:
  318. ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  319. uint64_t* bytes_written, uint64_t* bytes_read,
  320. uint64_t* sequence, uint64_t num_ops,
  321. uint64_t* read_hits,
  322. std::atomic_int* threads_done)
  323. : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  324. num_ops, read_hits) {
  325. threads_done_ = threads_done;
  326. }
  327. void operator()() override {
  328. for (unsigned int i = 0; i < num_ops_; ++i) {
  329. ReadOne();
  330. }
  331. ++*threads_done_;
  332. }
  333. private:
  334. std::atomic_int* threads_done_;
  335. };
  336. class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
  337. public:
  338. SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  339. uint64_t* bytes_written,
  340. uint64_t* bytes_read, uint64_t* sequence,
  341. uint64_t num_ops, uint64_t* read_hits,
  342. std::atomic_int* threads_done)
  343. : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
  344. sequence, num_ops, read_hits) {
  345. threads_done_ = threads_done;
  346. }
  347. void operator()() override {
  348. for (unsigned int i = 0; i < num_ops_; ++i) {
  349. ReadOneSeq();
  350. }
  351. ++*threads_done_;
  352. }
  353. private:
  354. std::atomic_int* threads_done_;
  355. };
  356. class Benchmark {
  357. public:
  358. explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
  359. uint64_t* sequence, uint32_t num_threads)
  360. : table_(table),
  361. key_gen_(key_gen),
  362. sequence_(sequence),
  363. num_threads_(num_threads) {}
  364. virtual ~Benchmark() {}
  365. virtual void Run() {
  366. std::cout << "Number of threads: " << num_threads_ << std::endl;
  367. std::vector<port::Thread> threads;
  368. uint64_t bytes_written = 0;
  369. uint64_t bytes_read = 0;
  370. uint64_t read_hits = 0;
  371. StopWatchNano timer(SystemClock::Default().get(), true);
  372. RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
  373. auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
  374. std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
  375. << std::endl;
  376. if (bytes_written > 0) {
  377. auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
  378. auto write_throughput = MiB_written / (elapsed_time / 1000000);
  379. std::cout << "Total bytes written: " << MiB_written << " MiB"
  380. << std::endl;
  381. std::cout << "Write throughput: " << write_throughput << " MiB/s"
  382. << std::endl;
  383. auto us_per_op = elapsed_time / num_write_ops_per_thread_;
  384. std::cout << "write us/op: " << us_per_op << std::endl;
  385. }
  386. if (bytes_read > 0) {
  387. auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
  388. auto read_throughput = MiB_read / (elapsed_time / 1000000);
  389. std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
  390. std::cout << "Read throughput: " << read_throughput << " MiB/s"
  391. << std::endl;
  392. auto us_per_op = elapsed_time / num_read_ops_per_thread_;
  393. std::cout << "read us/op: " << us_per_op << std::endl;
  394. }
  395. }
  396. virtual void RunThreads(std::vector<port::Thread>* threads,
  397. uint64_t* bytes_written, uint64_t* bytes_read,
  398. bool write, uint64_t* read_hits) = 0;
  399. protected:
  400. MemTableRep* table_;
  401. KeyGenerator* key_gen_;
  402. uint64_t* sequence_;
  403. uint64_t num_write_ops_per_thread_ = 0;
  404. uint64_t num_read_ops_per_thread_ = 0;
  405. const uint32_t num_threads_;
  406. };
  407. class FillBenchmark : public Benchmark {
  408. public:
  409. explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
  410. uint64_t* sequence)
  411. : Benchmark(table, key_gen, sequence, 1) {
  412. num_write_ops_per_thread_ = FLAGS_num_operations;
  413. }
  414. void RunThreads(std::vector<port::Thread>* /*threads*/,
  415. uint64_t* bytes_written, uint64_t* bytes_read, bool /*write*/,
  416. uint64_t* read_hits) override {
  417. FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
  418. num_write_ops_per_thread_, read_hits)();
  419. }
  420. };
  421. class ReadBenchmark : public Benchmark {
  422. public:
  423. explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
  424. uint64_t* sequence)
  425. : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
  426. num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
  427. }
  428. void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
  429. uint64_t* bytes_read, bool /*write*/,
  430. uint64_t* read_hits) override {
  431. for (int i = 0; i < FLAGS_num_threads; ++i) {
  432. threads->emplace_back(
  433. ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
  434. sequence_, num_read_ops_per_thread_, read_hits));
  435. }
  436. for (auto& thread : *threads) {
  437. thread.join();
  438. }
  439. std::cout << "read hit%: "
  440. << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
  441. << std::endl;
  442. }
  443. };
  444. class SeqReadBenchmark : public Benchmark {
  445. public:
  446. explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
  447. : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
  448. num_read_ops_per_thread_ = FLAGS_num_scans;
  449. }
  450. void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
  451. uint64_t* bytes_read, bool /*write*/,
  452. uint64_t* read_hits) override {
  453. for (int i = 0; i < FLAGS_num_threads; ++i) {
  454. threads->emplace_back(SeqReadBenchmarkThread(
  455. table_, key_gen_, bytes_written, bytes_read, sequence_,
  456. num_read_ops_per_thread_, read_hits));
  457. }
  458. for (auto& thread : *threads) {
  459. thread.join();
  460. }
  461. }
  462. };
  463. template <class ReadThreadType>
  464. class ReadWriteBenchmark : public Benchmark {
  465. public:
  466. explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
  467. uint64_t* sequence)
  468. : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
  469. num_read_ops_per_thread_ =
  470. FLAGS_num_threads <= 1
  471. ? 0
  472. : (FLAGS_num_operations / (FLAGS_num_threads - 1));
  473. num_write_ops_per_thread_ = FLAGS_num_operations;
  474. }
  475. void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
  476. uint64_t* bytes_read, bool /*write*/,
  477. uint64_t* read_hits) override {
  478. std::atomic_int threads_done;
  479. threads_done.store(0);
  480. threads->emplace_back(ConcurrentFillBenchmarkThread(
  481. table_, key_gen_, bytes_written, bytes_read, sequence_,
  482. num_write_ops_per_thread_, read_hits, &threads_done));
  483. for (int i = 1; i < FLAGS_num_threads; ++i) {
  484. threads->emplace_back(
  485. ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
  486. num_read_ops_per_thread_, read_hits, &threads_done));
  487. }
  488. for (auto& thread : *threads) {
  489. thread.join();
  490. }
  491. }
  492. };
  493. } // namespace ROCKSDB_NAMESPACE
  494. void PrintWarnings() {
  495. #if defined(__GNUC__) && !defined(__OPTIMIZE__)
  496. fprintf(stdout,
  497. "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
  498. #endif
  499. #ifndef NDEBUG
  500. fprintf(stdout,
  501. "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
  502. #endif
  503. }
  504. int main(int argc, char** argv) {
  505. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  506. SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
  507. " [OPTIONS]...");
  508. ParseCommandLineFlags(&argc, &argv, true);
  509. PrintWarnings();
  510. ROCKSDB_NAMESPACE::Options options;
  511. std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRepFactory> factory;
  512. if (FLAGS_memtablerep == "skiplist") {
  513. factory.reset(new ROCKSDB_NAMESPACE::SkipListFactory);
  514. } else if (FLAGS_memtablerep == "vector") {
  515. factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
  516. } else if (FLAGS_memtablerep == "hashskiplist" ||
  517. FLAGS_memtablerep == "prefix_hash") {
  518. factory.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
  519. FLAGS_bucket_count, FLAGS_hashskiplist_height,
  520. FLAGS_hashskiplist_branching_factor));
  521. options.prefix_extractor.reset(
  522. ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
  523. } else if (FLAGS_memtablerep == "hashlinklist" ||
  524. FLAGS_memtablerep == "hash_linkedlist") {
  525. factory.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
  526. FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
  527. FLAGS_bucket_entries_logging_threshold,
  528. FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
  529. options.prefix_extractor.reset(
  530. ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
  531. } else {
  532. ROCKSDB_NAMESPACE::ConfigOptions config_options;
  533. config_options.ignore_unsupported_options = false;
  534. ROCKSDB_NAMESPACE::Status s =
  535. ROCKSDB_NAMESPACE::MemTableRepFactory::CreateFromString(
  536. config_options, FLAGS_memtablerep, &factory);
  537. if (!s.ok()) {
  538. fprintf(stdout, "Unknown memtablerep: %s\n", s.ToString().c_str());
  539. exit(1);
  540. }
  541. }
  542. ROCKSDB_NAMESPACE::InternalKeyComparator internal_key_comp(
  543. ROCKSDB_NAMESPACE::BytewiseComparator());
  544. ROCKSDB_NAMESPACE::MemTable::KeyComparator key_comp(internal_key_comp);
  545. ROCKSDB_NAMESPACE::Arena arena;
  546. ROCKSDB_NAMESPACE::WriteBufferManager wb(FLAGS_write_buffer_size);
  547. uint64_t sequence;
  548. auto createMemtableRep = [&] {
  549. sequence = 0;
  550. return factory->CreateMemTableRep(key_comp, &arena,
  551. options.prefix_extractor.get(),
  552. options.info_log.get());
  553. };
  554. std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRep> memtablerep;
  555. ROCKSDB_NAMESPACE::Random64 rng(FLAGS_seed);
  556. const char* benchmarks = FLAGS_benchmarks.c_str();
  557. while (benchmarks != nullptr) {
  558. std::unique_ptr<ROCKSDB_NAMESPACE::KeyGenerator> key_gen;
  559. const char* sep = strchr(benchmarks, ',');
  560. ROCKSDB_NAMESPACE::Slice name;
  561. if (sep == nullptr) {
  562. name = benchmarks;
  563. benchmarks = nullptr;
  564. } else {
  565. name = ROCKSDB_NAMESPACE::Slice(benchmarks, sep - benchmarks);
  566. benchmarks = sep + 1;
  567. }
  568. std::unique_ptr<ROCKSDB_NAMESPACE::Benchmark> benchmark;
  569. if (name == ROCKSDB_NAMESPACE::Slice("fillseq")) {
  570. memtablerep.reset(createMemtableRep());
  571. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  572. &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
  573. benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
  574. memtablerep.get(), key_gen.get(), &sequence));
  575. } else if (name == ROCKSDB_NAMESPACE::Slice("fillrandom")) {
  576. memtablerep.reset(createMemtableRep());
  577. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  578. &rng, ROCKSDB_NAMESPACE::UNIQUE_RANDOM, FLAGS_num_operations));
  579. benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
  580. memtablerep.get(), key_gen.get(), &sequence));
  581. } else if (name == ROCKSDB_NAMESPACE::Slice("readrandom")) {
  582. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  583. &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
  584. benchmark.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
  585. memtablerep.get(), key_gen.get(), &sequence));
  586. } else if (name == ROCKSDB_NAMESPACE::Slice("readseq")) {
  587. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  588. &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
  589. benchmark.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep.get(),
  590. &sequence));
  591. } else if (name == ROCKSDB_NAMESPACE::Slice("readwrite")) {
  592. memtablerep.reset(createMemtableRep());
  593. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  594. &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
  595. benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
  596. ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread>(
  597. memtablerep.get(), key_gen.get(), &sequence));
  598. } else if (name == ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
  599. memtablerep.reset(createMemtableRep());
  600. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  601. &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
  602. benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
  603. ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread>(
  604. memtablerep.get(), key_gen.get(), &sequence));
  605. } else {
  606. std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
  607. << std::endl;
  608. continue;
  609. }
  610. std::cout << "Running " << name.ToString() << std::endl;
  611. benchmark->Run();
  612. }
  613. return 0;
  614. }
  615. #endif // GFLAGS