run_leveldb.sh 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #!/usr/bin/env bash
  2. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
  3. # REQUIRE: benchmark_leveldb.sh exists in the current directory
  4. # After execution of this script, log files are generated in $output_dir.
  5. # report.txt provides a high level statistics
  6. #
  7. # This should be used with the LevelDB fork listed here to use additional test options.
  8. # For more details on the changes see the blog post listed below.
  9. # https://github.com/mdcallag/leveldb-1
  10. # http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html
  11. #
  12. # This should be run from the parent of the tools directory. The command line is:
  13. # [$env_vars] tools/run_flash_bench.sh [list-of-threads]
  14. #
  15. # This runs a sequence of tests in the following sequence:
  16. # step 1) load - bulkload, compact, fillseq, overwrite
  17. # step 2) read-only for each number of threads
  18. # step 3) read-write for each number of threads
  19. #
  20. # The list of threads is optional and when not set is equivalent to "24".
  21. # Were list-of-threads specified as "1 2 4" then the tests in steps 2, 3 and
  22. # 4 above would be repeated for 1, 2 and 4 threads. The tests in step 1 are
  23. # only run for 1 thread.
  24. # Test output is written to $OUTPUT_DIR, currently /tmp/output. The performance
  25. # summary is in $OUTPUT_DIR/report.txt. There is one file in $OUTPUT_DIR per
  26. # test and the tests are listed below.
  27. #
  28. # The environment variables are also optional. The variables are:
  29. # NKEYS - number of key/value pairs to load
  30. # NWRITESPERSEC - the writes/second rate limit for the *whilewriting* tests.
  31. # If this is too large then the non-writer threads can get
  32. # starved.
  33. # VAL_SIZE - the length of the value in the key/value pairs loaded.
  34. # You can estimate the size of the test database from this,
  35. # NKEYS and the compression rate (--compression_ratio) set
  36. # in tools/benchmark_leveldb.sh
  37. # BLOCK_LENGTH - value for db_bench --block_size
  38. # CACHE_BYTES - the size of the RocksDB block cache in bytes
  39. # DATA_DIR - directory in which to create database files
  40. # DO_SETUP - when set to 0 then a backup of the database is copied from
  41. # $DATA_DIR.bak to $DATA_DIR and the load tests from step 1
  42. # This allows tests from steps 2, 3 to be repeated faster.
  43. # SAVE_SETUP - saves a copy of the database at the end of step 1 to
  44. # $DATA_DIR.bak.
  45. # Size constants
  46. K=1024
  47. M=$((1024 * K))
  48. G=$((1024 * M))
  49. num_keys=${NKEYS:-$((1 * G))}
  50. wps=${NWRITESPERSEC:-$((10 * K))}
  51. vs=${VAL_SIZE:-400}
  52. cs=${CACHE_BYTES:-$(( 1 * G ))}
  53. bs=${BLOCK_LENGTH:-4096}
  54. # If no command line arguments then run for 24 threads.
  55. if [[ $# -eq 0 ]]; then
  56. nthreads=( 24 )
  57. else
  58. nthreads=( "$@" )
  59. fi
  60. for num_thr in "${nthreads[@]}" ; do
  61. echo Will run for $num_thr threads
  62. done
  63. # Update these parameters before execution !!!
  64. db_dir=${DATA_DIR:-"/tmp/rocksdb/"}
  65. do_setup=${DO_SETUP:-1}
  66. save_setup=${SAVE_SETUP:-0}
  67. output_dir="${TMPDIR:-/tmp}/output"
  68. ARGS="\
  69. OUTPUT_DIR=$output_dir \
  70. NUM_KEYS=$num_keys \
  71. DB_DIR=$db_dir \
  72. VALUE_SIZE=$vs \
  73. BLOCK_SIZE=$bs \
  74. CACHE_SIZE=$cs"
  75. mkdir -p $output_dir
  76. echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" \
  77. > $output_dir/report.txt
  78. # Notes on test sequence:
  79. # step 1) Setup database via sequential fill followed by overwrite to fragment it.
  80. # Done without setting DURATION to make sure that overwrite does $num_keys writes
  81. # step 2) read-only tests for all levels of concurrency requested
  82. # step 3) non read-only tests for all levels of concurrency requested
  83. ###### Setup the database
  84. if [[ $do_setup != 0 ]]; then
  85. echo Doing setup
  86. # Test 2a: sequential fill with large values to get peak ingest
  87. # adjust NUM_KEYS given the use of larger values
  88. env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
  89. ./tools/benchmark_leveldb.sh fillseq
  90. # Test 2b: sequential fill with the configured value size
  91. env $ARGS ./tools/benchmark_leveldb.sh fillseq
  92. # Test 3: single-threaded overwrite
  93. env $ARGS NUM_THREADS=1 DB_BENCH_NO_SYNC=1 ./tools/benchmark_leveldb.sh overwrite
  94. else
  95. echo Restoring from backup
  96. rm -rf $db_dir
  97. if [ ! -d ${db_dir}.bak ]; then
  98. echo Database backup does not exist at ${db_dir}.bak
  99. exit -1
  100. fi
  101. echo Restore database from ${db_dir}.bak
  102. cp -p -r ${db_dir}.bak $db_dir
  103. fi
  104. if [[ $save_setup != 0 ]]; then
  105. echo Save database to ${db_dir}.bak
  106. cp -p -r $db_dir ${db_dir}.bak
  107. fi
  108. ###### Read-only tests
  109. for num_thr in "${nthreads[@]}" ; do
  110. # Test 4: random read
  111. env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh readrandom
  112. done
  113. ###### Non read-only tests
  114. for num_thr in "${nthreads[@]}" ; do
  115. # Test 7: overwrite with sync=0
  116. env $ARGS NUM_THREADS=$num_thr DB_BENCH_NO_SYNC=1 \
  117. ./tools/benchmark_leveldb.sh overwrite
  118. # Test 8: overwrite with sync=1
  119. # Not run for now because LevelDB db_bench doesn't have an option to limit the
  120. # test run to X seconds and doing sync-per-commit for --num can take too long.
  121. # env $ARGS NUM_THREADS=$num_thr ./tools/benchmark_leveldb.sh overwrite
  122. # Test 11: random read while writing
  123. env $ARGS NUM_THREADS=$num_thr WRITES_PER_SECOND=$wps \
  124. ./tools/benchmark_leveldb.sh readwhilewriting
  125. done
  126. echo bulkload > $output_dir/report2.txt
  127. head -1 $output_dir/report.txt >> $output_dir/report2.txt
  128. grep bulkload $output_dir/report.txt >> $output_dir/report2.txt
  129. echo fillseq >> $output_dir/report2.txt
  130. head -1 $output_dir/report.txt >> $output_dir/report2.txt
  131. grep fillseq $output_dir/report.txt >> $output_dir/report2.txt
  132. echo overwrite sync=0 >> $output_dir/report2.txt
  133. head -1 $output_dir/report.txt >> $output_dir/report2.txt
  134. grep overwrite $output_dir/report.txt | grep \.s0 >> $output_dir/report2.txt
  135. echo overwrite sync=1 >> $output_dir/report2.txt
  136. head -1 $output_dir/report.txt >> $output_dir/report2.txt
  137. grep overwrite $output_dir/report.txt | grep \.s1 >> $output_dir/report2.txt
  138. echo readrandom >> $output_dir/report2.txt
  139. head -1 $output_dir/report.txt >> $output_dir/report2.txt
  140. grep readrandom $output_dir/report.txt >> $output_dir/report2.txt
  141. echo readwhile >> $output_dir/report2.txt >> $output_dir/report2.txt
  142. head -1 $output_dir/report.txt >> $output_dir/report2.txt
  143. grep readwhilewriting $output_dir/report.txt >> $output_dir/report2.txt
  144. cat $output_dir/report2.txt