memtablerep_bench.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #ifndef GFLAGS
  10. #include <cstdio>
  11. int main() {
  12. fprintf(stderr, "Please install gflags to run rocksdb tools\n");
  13. return 1;
  14. }
  15. #else
  16. #include <atomic>
  17. #include <iostream>
  18. #include <memory>
  19. #include <thread>
  20. #include <type_traits>
  21. #include <vector>
  22. #include "db/dbformat.h"
  23. #include "db/memtable.h"
  24. #include "memory/arena.h"
  25. #include "port/port.h"
  26. #include "port/stack_trace.h"
  27. #include "rocksdb/comparator.h"
  28. #include "rocksdb/memtablerep.h"
  29. #include "rocksdb/options.h"
  30. #include "rocksdb/slice_transform.h"
  31. #include "rocksdb/write_buffer_manager.h"
  32. #include "test_util/testutil.h"
  33. #include "util/gflags_compat.h"
  34. #include "util/mutexlock.h"
  35. #include "util/stop_watch.h"
  36. using GFLAGS_NAMESPACE::ParseCommandLineFlags;
  37. using GFLAGS_NAMESPACE::RegisterFlagValidator;
  38. using GFLAGS_NAMESPACE::SetUsageMessage;
  39. DEFINE_string(benchmarks, "fillrandom",
  40. "Comma-separated list of benchmarks to run. Options:\n"
  41. "\tfillrandom -- write N random values\n"
  42. "\tfillseq -- write N values in sequential order\n"
  43. "\treadrandom -- read N values in random order\n"
  44. "\treadseq -- scan the DB\n"
  45. "\treadwrite -- 1 thread writes while N - 1 threads "
  46. "do random\n"
  47. "\t reads\n"
  48. "\tseqreadwrite -- 1 thread writes while N - 1 threads "
  49. "do scans\n");
  50. DEFINE_string(memtablerep, "skiplist",
  51. "Which implementation of memtablerep to use. See "
  52. "include/memtablerep.h for\n"
  53. " more details. Options:\n"
  54. "\tskiplist -- backed by a skiplist\n"
  55. "\tvector -- backed by an std::vector\n"
  56. "\thashskiplist -- backed by a hash skip list\n"
  57. "\thashlinklist -- backed by a hash linked list\n"
  58. "\tcuckoo -- backed by a cuckoo hash table");
  59. DEFINE_int64(bucket_count, 1000000,
  60. "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
  61. "NewHashLinkListRepFactory");
  62. DEFINE_int32(
  63. hashskiplist_height, 4,
  64. "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
  65. DEFINE_int32(
  66. hashskiplist_branching_factor, 4,
  67. "branching_factor parameter to pass into NewHashSkiplistRepFactory");
  68. DEFINE_int32(
  69. huge_page_tlb_size, 0,
  70. "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
  71. DEFINE_int32(bucket_entries_logging_threshold, 4096,
  72. "bucket_entries_logging_threshold parameter to pass into "
  73. "NewHashLinkListRepFactory");
  74. DEFINE_bool(if_log_bucket_dist_when_flash, true,
  75. "if_log_bucket_dist_when_flash parameter to pass into "
  76. "NewHashLinkListRepFactory");
  77. DEFINE_int32(
  78. threshold_use_skiplist, 256,
  79. "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
  80. DEFINE_int64(write_buffer_size, 256,
  81. "write_buffer_size parameter to pass into WriteBufferManager");
  82. DEFINE_int32(
  83. num_threads, 1,
  84. "Number of concurrent threads to run. If the benchmark includes writes,\n"
  85. "then at most one thread will be a writer");
  86. DEFINE_int32(num_operations, 1000000,
  87. "Number of operations to do for write and random read benchmarks");
  88. DEFINE_int32(num_scans, 10,
  89. "Number of times for each thread to scan the memtablerep for "
  90. "sequential read "
  91. "benchmarks");
  92. DEFINE_int32(item_size, 100, "Number of bytes each item should be");
  93. DEFINE_int32(prefix_length, 8,
  94. "Prefix length to pass into NewFixedPrefixTransform");
  95. /* VectorRep settings */
  96. DEFINE_int64(vectorrep_count, 0,
  97. "Number of entries to reserve on VectorRep initialization");
  98. DEFINE_int64(seed, 0,
  99. "Seed base for random number generators. "
  100. "When 0 it is deterministic.");
  101. namespace ROCKSDB_NAMESPACE {
  102. namespace {
  103. struct CallbackVerifyArgs {
  104. bool found;
  105. LookupKey* key;
  106. MemTableRep* table;
  107. InternalKeyComparator* comparator;
  108. };
  109. } // namespace
  110. // Helper for quickly generating random data.
  111. class RandomGenerator {
  112. private:
  113. std::string data_;
  114. unsigned int pos_;
  115. public:
  116. RandomGenerator() {
  117. Random rnd(301);
  118. auto size = (unsigned)std::max(1048576, FLAGS_item_size);
  119. test::RandomString(&rnd, size, &data_);
  120. pos_ = 0;
  121. }
  122. Slice Generate(unsigned int len) {
  123. assert(len <= data_.size());
  124. if (pos_ + len > data_.size()) {
  125. pos_ = 0;
  126. }
  127. pos_ += len;
  128. return Slice(data_.data() + pos_ - len, len);
  129. }
  130. };
  131. enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
  132. class KeyGenerator {
  133. public:
  134. KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
  135. : rand_(rand), mode_(mode), num_(num), next_(0) {
  136. if (mode_ == UNIQUE_RANDOM) {
  137. // NOTE: if memory consumption of this approach becomes a concern,
  138. // we can either break it into pieces and only random shuffle a section
  139. // each time. Alternatively, use a bit map implementation
  140. // (https://reviews.facebook.net/differential/diff/54627/)
  141. values_.resize(num_);
  142. for (uint64_t i = 0; i < num_; ++i) {
  143. values_[i] = i;
  144. }
  145. std::shuffle(
  146. values_.begin(), values_.end(),
  147. std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
  148. }
  149. }
  150. uint64_t Next() {
  151. switch (mode_) {
  152. case SEQUENTIAL:
  153. return next_++;
  154. case RANDOM:
  155. return rand_->Next() % num_;
  156. case UNIQUE_RANDOM:
  157. return values_[next_++];
  158. }
  159. assert(false);
  160. return std::numeric_limits<uint64_t>::max();
  161. }
  162. private:
  163. Random64* rand_;
  164. WriteMode mode_;
  165. const uint64_t num_;
  166. uint64_t next_;
  167. std::vector<uint64_t> values_;
  168. };
  169. class BenchmarkThread {
  170. public:
  171. explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  172. uint64_t* bytes_written, uint64_t* bytes_read,
  173. uint64_t* sequence, uint64_t num_ops,
  174. uint64_t* read_hits)
  175. : table_(table),
  176. key_gen_(key_gen),
  177. bytes_written_(bytes_written),
  178. bytes_read_(bytes_read),
  179. sequence_(sequence),
  180. num_ops_(num_ops),
  181. read_hits_(read_hits) {}
  182. virtual void operator()() = 0;
  183. virtual ~BenchmarkThread() {}
  184. protected:
  185. MemTableRep* table_;
  186. KeyGenerator* key_gen_;
  187. uint64_t* bytes_written_;
  188. uint64_t* bytes_read_;
  189. uint64_t* sequence_;
  190. uint64_t num_ops_;
  191. uint64_t* read_hits_;
  192. RandomGenerator generator_;
  193. };
  194. class FillBenchmarkThread : public BenchmarkThread {
  195. public:
  196. FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  197. uint64_t* bytes_written, uint64_t* bytes_read,
  198. uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
  199. : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  200. num_ops, read_hits) {}
  201. void FillOne() {
  202. char* buf = nullptr;
  203. auto internal_key_size = 16;
  204. auto encoded_len =
  205. FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
  206. KeyHandle handle = table_->Allocate(encoded_len, &buf);
  207. assert(buf != nullptr);
  208. char* p = EncodeVarint32(buf, internal_key_size);
  209. auto key = key_gen_->Next();
  210. EncodeFixed64(p, key);
  211. p += 8;
  212. EncodeFixed64(p, ++(*sequence_));
  213. p += 8;
  214. Slice bytes = generator_.Generate(FLAGS_item_size);
  215. memcpy(p, bytes.data(), FLAGS_item_size);
  216. p += FLAGS_item_size;
  217. assert(p == buf + encoded_len);
  218. table_->Insert(handle);
  219. *bytes_written_ += encoded_len;
  220. }
  221. void operator()() override {
  222. for (unsigned int i = 0; i < num_ops_; ++i) {
  223. FillOne();
  224. }
  225. }
  226. };
  227. class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
  228. public:
  229. ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  230. uint64_t* bytes_written, uint64_t* bytes_read,
  231. uint64_t* sequence, uint64_t num_ops,
  232. uint64_t* read_hits,
  233. std::atomic_int* threads_done)
  234. : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  235. num_ops, read_hits) {
  236. threads_done_ = threads_done;
  237. }
  238. void operator()() override {
  239. // # of read threads will be total threads - write threads (always 1). Loop
  240. // while all reads complete.
  241. while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
  242. FillOne();
  243. }
  244. }
  245. private:
  246. std::atomic_int* threads_done_;
  247. };
  248. class ReadBenchmarkThread : public BenchmarkThread {
  249. public:
  250. ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  251. uint64_t* bytes_written, uint64_t* bytes_read,
  252. uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
  253. : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  254. num_ops, read_hits) {}
  255. static bool callback(void* arg, const char* entry) {
  256. CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
  257. assert(callback_args != nullptr);
  258. uint32_t key_length;
  259. const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
  260. if ((callback_args->comparator)
  261. ->user_comparator()
  262. ->Equal(Slice(key_ptr, key_length - 8),
  263. callback_args->key->user_key())) {
  264. callback_args->found = true;
  265. }
  266. return false;
  267. }
  268. void ReadOne() {
  269. std::string user_key;
  270. auto key = key_gen_->Next();
  271. PutFixed64(&user_key, key);
  272. LookupKey lookup_key(user_key, *sequence_);
  273. InternalKeyComparator internal_key_comp(BytewiseComparator());
  274. CallbackVerifyArgs verify_args;
  275. verify_args.found = false;
  276. verify_args.key = &lookup_key;
  277. verify_args.table = table_;
  278. verify_args.comparator = &internal_key_comp;
  279. table_->Get(lookup_key, &verify_args, callback);
  280. if (verify_args.found) {
  281. *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
  282. ++*read_hits_;
  283. }
  284. }
  285. void operator()() override {
  286. for (unsigned int i = 0; i < num_ops_; ++i) {
  287. ReadOne();
  288. }
  289. }
  290. };
  291. class SeqReadBenchmarkThread : public BenchmarkThread {
  292. public:
  293. SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  294. uint64_t* bytes_written, uint64_t* bytes_read,
  295. uint64_t* sequence, uint64_t num_ops,
  296. uint64_t* read_hits)
  297. : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  298. num_ops, read_hits) {}
  299. void ReadOneSeq() {
  300. std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
  301. for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
  302. // pretend to read the value
  303. *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
  304. }
  305. ++*read_hits_;
  306. }
  307. void operator()() override {
  308. for (unsigned int i = 0; i < num_ops_; ++i) {
  309. { ReadOneSeq(); }
  310. }
  311. }
  312. };
  313. class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
  314. public:
  315. ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  316. uint64_t* bytes_written, uint64_t* bytes_read,
  317. uint64_t* sequence, uint64_t num_ops,
  318. uint64_t* read_hits,
  319. std::atomic_int* threads_done)
  320. : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
  321. num_ops, read_hits) {
  322. threads_done_ = threads_done;
  323. }
  324. void operator()() override {
  325. for (unsigned int i = 0; i < num_ops_; ++i) {
  326. ReadOne();
  327. }
  328. ++*threads_done_;
  329. }
  330. private:
  331. std::atomic_int* threads_done_;
  332. };
  333. class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
  334. public:
  335. SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
  336. uint64_t* bytes_written,
  337. uint64_t* bytes_read, uint64_t* sequence,
  338. uint64_t num_ops, uint64_t* read_hits,
  339. std::atomic_int* threads_done)
  340. : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
  341. sequence, num_ops, read_hits) {
  342. threads_done_ = threads_done;
  343. }
  344. void operator()() override {
  345. for (unsigned int i = 0; i < num_ops_; ++i) {
  346. ReadOneSeq();
  347. }
  348. ++*threads_done_;
  349. }
  350. private:
  351. std::atomic_int* threads_done_;
  352. };
  353. class Benchmark {
  354. public:
  355. explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
  356. uint64_t* sequence, uint32_t num_threads)
  357. : table_(table),
  358. key_gen_(key_gen),
  359. sequence_(sequence),
  360. num_threads_(num_threads) {}
  361. virtual ~Benchmark() {}
  362. virtual void Run() {
  363. std::cout << "Number of threads: " << num_threads_ << std::endl;
  364. std::vector<port::Thread> threads;
  365. uint64_t bytes_written = 0;
  366. uint64_t bytes_read = 0;
  367. uint64_t read_hits = 0;
  368. StopWatchNano timer(Env::Default(), true);
  369. RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
  370. auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
  371. std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
  372. << std::endl;
  373. if (bytes_written > 0) {
  374. auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
  375. auto write_throughput = MiB_written / (elapsed_time / 1000000);
  376. std::cout << "Total bytes written: " << MiB_written << " MiB"
  377. << std::endl;
  378. std::cout << "Write throughput: " << write_throughput << " MiB/s"
  379. << std::endl;
  380. auto us_per_op = elapsed_time / num_write_ops_per_thread_;
  381. std::cout << "write us/op: " << us_per_op << std::endl;
  382. }
  383. if (bytes_read > 0) {
  384. auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
  385. auto read_throughput = MiB_read / (elapsed_time / 1000000);
  386. std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
  387. std::cout << "Read throughput: " << read_throughput << " MiB/s"
  388. << std::endl;
  389. auto us_per_op = elapsed_time / num_read_ops_per_thread_;
  390. std::cout << "read us/op: " << us_per_op << std::endl;
  391. }
  392. }
  393. virtual void RunThreads(std::vector<port::Thread>* threads,
  394. uint64_t* bytes_written, uint64_t* bytes_read,
  395. bool write, uint64_t* read_hits) = 0;
  396. protected:
  397. MemTableRep* table_;
  398. KeyGenerator* key_gen_;
  399. uint64_t* sequence_;
  400. uint64_t num_write_ops_per_thread_;
  401. uint64_t num_read_ops_per_thread_;
  402. const uint32_t num_threads_;
  403. };
  404. class FillBenchmark : public Benchmark {
  405. public:
  406. explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
  407. uint64_t* sequence)
  408. : Benchmark(table, key_gen, sequence, 1) {
  409. num_write_ops_per_thread_ = FLAGS_num_operations;
  410. }
  411. void RunThreads(std::vector<port::Thread>* /*threads*/, uint64_t* bytes_written,
  412. uint64_t* bytes_read, bool /*write*/,
  413. uint64_t* read_hits) override {
  414. FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
  415. num_write_ops_per_thread_, read_hits)();
  416. }
  417. };
  418. class ReadBenchmark : public Benchmark {
  419. public:
  420. explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
  421. uint64_t* sequence)
  422. : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
  423. num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
  424. }
  425. void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
  426. uint64_t* bytes_read, bool /*write*/,
  427. uint64_t* read_hits) override {
  428. for (int i = 0; i < FLAGS_num_threads; ++i) {
  429. threads->emplace_back(
  430. ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
  431. sequence_, num_read_ops_per_thread_, read_hits));
  432. }
  433. for (auto& thread : *threads) {
  434. thread.join();
  435. }
  436. std::cout << "read hit%: "
  437. << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
  438. << std::endl;
  439. }
  440. };
  441. class SeqReadBenchmark : public Benchmark {
  442. public:
  443. explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
  444. : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
  445. num_read_ops_per_thread_ = FLAGS_num_scans;
  446. }
  447. void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
  448. uint64_t* bytes_read, bool /*write*/,
  449. uint64_t* read_hits) override {
  450. for (int i = 0; i < FLAGS_num_threads; ++i) {
  451. threads->emplace_back(SeqReadBenchmarkThread(
  452. table_, key_gen_, bytes_written, bytes_read, sequence_,
  453. num_read_ops_per_thread_, read_hits));
  454. }
  455. for (auto& thread : *threads) {
  456. thread.join();
  457. }
  458. }
  459. };
  460. template <class ReadThreadType>
  461. class ReadWriteBenchmark : public Benchmark {
  462. public:
  463. explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
  464. uint64_t* sequence)
  465. : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
  466. num_read_ops_per_thread_ =
  467. FLAGS_num_threads <= 1
  468. ? 0
  469. : (FLAGS_num_operations / (FLAGS_num_threads - 1));
  470. num_write_ops_per_thread_ = FLAGS_num_operations;
  471. }
  472. void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
  473. uint64_t* bytes_read, bool /*write*/,
  474. uint64_t* read_hits) override {
  475. std::atomic_int threads_done;
  476. threads_done.store(0);
  477. threads->emplace_back(ConcurrentFillBenchmarkThread(
  478. table_, key_gen_, bytes_written, bytes_read, sequence_,
  479. num_write_ops_per_thread_, read_hits, &threads_done));
  480. for (int i = 1; i < FLAGS_num_threads; ++i) {
  481. threads->emplace_back(
  482. ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
  483. num_read_ops_per_thread_, read_hits, &threads_done));
  484. }
  485. for (auto& thread : *threads) {
  486. thread.join();
  487. }
  488. }
  489. };
  490. } // namespace ROCKSDB_NAMESPACE
  491. void PrintWarnings() {
  492. #if defined(__GNUC__) && !defined(__OPTIMIZE__)
  493. fprintf(stdout,
  494. "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
  495. #endif
  496. #ifndef NDEBUG
  497. fprintf(stdout,
  498. "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
  499. #endif
  500. }
  501. int main(int argc, char** argv) {
  502. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  503. SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
  504. " [OPTIONS]...");
  505. ParseCommandLineFlags(&argc, &argv, true);
  506. PrintWarnings();
  507. ROCKSDB_NAMESPACE::Options options;
  508. std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRepFactory> factory;
  509. if (FLAGS_memtablerep == "skiplist") {
  510. factory.reset(new ROCKSDB_NAMESPACE::SkipListFactory);
  511. #ifndef ROCKSDB_LITE
  512. } else if (FLAGS_memtablerep == "vector") {
  513. factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
  514. } else if (FLAGS_memtablerep == "hashskiplist") {
  515. factory.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
  516. FLAGS_bucket_count, FLAGS_hashskiplist_height,
  517. FLAGS_hashskiplist_branching_factor));
  518. options.prefix_extractor.reset(
  519. ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
  520. } else if (FLAGS_memtablerep == "hashlinklist") {
  521. factory.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
  522. FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
  523. FLAGS_bucket_entries_logging_threshold,
  524. FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
  525. options.prefix_extractor.reset(
  526. ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
  527. #endif // ROCKSDB_LITE
  528. } else {
  529. fprintf(stdout, "Unknown memtablerep: %s\n", FLAGS_memtablerep.c_str());
  530. exit(1);
  531. }
  532. ROCKSDB_NAMESPACE::InternalKeyComparator internal_key_comp(
  533. ROCKSDB_NAMESPACE::BytewiseComparator());
  534. ROCKSDB_NAMESPACE::MemTable::KeyComparator key_comp(internal_key_comp);
  535. ROCKSDB_NAMESPACE::Arena arena;
  536. ROCKSDB_NAMESPACE::WriteBufferManager wb(FLAGS_write_buffer_size);
  537. uint64_t sequence;
  538. auto createMemtableRep = [&] {
  539. sequence = 0;
  540. return factory->CreateMemTableRep(key_comp, &arena,
  541. options.prefix_extractor.get(),
  542. options.info_log.get());
  543. };
  544. std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRep> memtablerep;
  545. ROCKSDB_NAMESPACE::Random64 rng(FLAGS_seed);
  546. const char* benchmarks = FLAGS_benchmarks.c_str();
  547. while (benchmarks != nullptr) {
  548. std::unique_ptr<ROCKSDB_NAMESPACE::KeyGenerator> key_gen;
  549. const char* sep = strchr(benchmarks, ',');
  550. ROCKSDB_NAMESPACE::Slice name;
  551. if (sep == nullptr) {
  552. name = benchmarks;
  553. benchmarks = nullptr;
  554. } else {
  555. name = ROCKSDB_NAMESPACE::Slice(benchmarks, sep - benchmarks);
  556. benchmarks = sep + 1;
  557. }
  558. std::unique_ptr<ROCKSDB_NAMESPACE::Benchmark> benchmark;
  559. if (name == ROCKSDB_NAMESPACE::Slice("fillseq")) {
  560. memtablerep.reset(createMemtableRep());
  561. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  562. &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
  563. benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
  564. memtablerep.get(), key_gen.get(), &sequence));
  565. } else if (name == ROCKSDB_NAMESPACE::Slice("fillrandom")) {
  566. memtablerep.reset(createMemtableRep());
  567. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  568. &rng, ROCKSDB_NAMESPACE::UNIQUE_RANDOM, FLAGS_num_operations));
  569. benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
  570. memtablerep.get(), key_gen.get(), &sequence));
  571. } else if (name == ROCKSDB_NAMESPACE::Slice("readrandom")) {
  572. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  573. &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
  574. benchmark.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
  575. memtablerep.get(), key_gen.get(), &sequence));
  576. } else if (name == ROCKSDB_NAMESPACE::Slice("readseq")) {
  577. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  578. &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
  579. benchmark.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep.get(),
  580. &sequence));
  581. } else if (name == ROCKSDB_NAMESPACE::Slice("readwrite")) {
  582. memtablerep.reset(createMemtableRep());
  583. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  584. &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
  585. benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
  586. ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread>(
  587. memtablerep.get(), key_gen.get(), &sequence));
  588. } else if (name == ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
  589. memtablerep.reset(createMemtableRep());
  590. key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
  591. &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
  592. benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
  593. ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread>(
  594. memtablerep.get(), key_gen.get(), &sequence));
  595. } else {
  596. std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
  597. << std::endl;
  598. continue;
  599. }
  600. std::cout << "Running " << name.ToString() << std::endl;
  601. benchmark->Run();
  602. }
  603. return 0;
  604. }
  605. #endif // GFLAGS