regression_build_test.sh 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. #!/usr/bin/env bash
  2. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
  3. set -e
  4. NUM=10000000
  5. if [ $# -eq 1 ];then
  6. DATA_DIR=$1
  7. elif [ $# -eq 2 ];then
  8. DATA_DIR=$1
  9. STAT_FILE=$2
  10. fi
  11. # On the production build servers, set data and stat
  12. # files/directories not in /tmp or else the tempdir cleaning
  13. # scripts will make you very unhappy.
  14. DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)}
  15. STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)}
  16. function cleanup {
  17. rm -rf $DATA_DIR
  18. rm -f $STAT_FILE.fillseq
  19. rm -f $STAT_FILE.readrandom
  20. rm -f $STAT_FILE.overwrite
  21. rm -f $STAT_FILE.memtablefillreadrandom
  22. }
  23. trap cleanup EXIT
  24. if [ -z $GIT_BRANCH ]; then
  25. git_br=`git rev-parse --abbrev-ref HEAD`
  26. else
  27. git_br=$(basename $GIT_BRANCH)
  28. fi
  29. if [ $git_br == "master" ]; then
  30. git_br=""
  31. else
  32. git_br="."$git_br
  33. fi
  34. make release
  35. # measure fillseq + fill up the DB for overwrite benchmark
  36. ./db_bench \
  37. --benchmarks=fillseq \
  38. --db=$DATA_DIR \
  39. --use_existing_db=0 \
  40. --bloom_bits=10 \
  41. --num=$NUM \
  42. --writes=$NUM \
  43. --cache_size=6442450944 \
  44. --cache_numshardbits=6 \
  45. --table_cache_numshardbits=4 \
  46. --open_files=55000 \
  47. --statistics=1 \
  48. --histogram=1 \
  49. --disable_wal=1 \
  50. --sync=0 > ${STAT_FILE}.fillseq
  51. # measure overwrite performance
  52. ./db_bench \
  53. --benchmarks=overwrite \
  54. --db=$DATA_DIR \
  55. --use_existing_db=1 \
  56. --bloom_bits=10 \
  57. --num=$NUM \
  58. --writes=$((NUM / 10)) \
  59. --cache_size=6442450944 \
  60. --cache_numshardbits=6 \
  61. --table_cache_numshardbits=4 \
  62. --open_files=55000 \
  63. --statistics=1 \
  64. --histogram=1 \
  65. --disable_wal=1 \
  66. --sync=0 \
  67. --threads=8 > ${STAT_FILE}.overwrite
  68. # fill up the db for readrandom benchmark (1GB total size)
  69. ./db_bench \
  70. --benchmarks=fillseq \
  71. --db=$DATA_DIR \
  72. --use_existing_db=0 \
  73. --bloom_bits=10 \
  74. --num=$NUM \
  75. --writes=$NUM \
  76. --cache_size=6442450944 \
  77. --cache_numshardbits=6 \
  78. --table_cache_numshardbits=4 \
  79. --open_files=55000 \
  80. --statistics=1 \
  81. --histogram=1 \
  82. --disable_wal=1 \
  83. --sync=0 \
  84. --threads=1 > /dev/null
  85. # measure readrandom with 6GB block cache
  86. ./db_bench \
  87. --benchmarks=readrandom \
  88. --db=$DATA_DIR \
  89. --use_existing_db=1 \
  90. --bloom_bits=10 \
  91. --num=$NUM \
  92. --reads=$((NUM / 5)) \
  93. --cache_size=6442450944 \
  94. --cache_numshardbits=6 \
  95. --table_cache_numshardbits=4 \
  96. --open_files=55000 \
  97. --statistics=1 \
  98. --histogram=1 \
  99. --disable_wal=1 \
  100. --sync=0 \
  101. --threads=16 > ${STAT_FILE}.readrandom
  102. # measure readrandom with 6GB block cache and tailing iterator
  103. ./db_bench \
  104. --benchmarks=readrandom \
  105. --db=$DATA_DIR \
  106. --use_existing_db=1 \
  107. --bloom_bits=10 \
  108. --num=$NUM \
  109. --reads=$((NUM / 5)) \
  110. --cache_size=6442450944 \
  111. --cache_numshardbits=6 \
  112. --table_cache_numshardbits=4 \
  113. --open_files=55000 \
  114. --use_tailing_iterator=1 \
  115. --statistics=1 \
  116. --histogram=1 \
  117. --disable_wal=1 \
  118. --sync=0 \
  119. --threads=16 > ${STAT_FILE}.readrandomtailing
  120. # measure readrandom with 100MB block cache
  121. ./db_bench \
  122. --benchmarks=readrandom \
  123. --db=$DATA_DIR \
  124. --use_existing_db=1 \
  125. --bloom_bits=10 \
  126. --num=$NUM \
  127. --reads=$((NUM / 5)) \
  128. --cache_size=104857600 \
  129. --cache_numshardbits=6 \
  130. --table_cache_numshardbits=4 \
  131. --open_files=55000 \
  132. --statistics=1 \
  133. --histogram=1 \
  134. --disable_wal=1 \
  135. --sync=0 \
  136. --threads=16 > ${STAT_FILE}.readrandomsmallblockcache
  137. # measure readrandom with 8k data in memtable
  138. ./db_bench \
  139. --benchmarks=overwrite,readrandom \
  140. --db=$DATA_DIR \
  141. --use_existing_db=1 \
  142. --bloom_bits=10 \
  143. --num=$NUM \
  144. --reads=$((NUM / 5)) \
  145. --writes=512 \
  146. --cache_size=6442450944 \
  147. --cache_numshardbits=6 \
  148. --table_cache_numshardbits=4 \
  149. --write_buffer_size=1000000000 \
  150. --open_files=55000 \
  151. --statistics=1 \
  152. --histogram=1 \
  153. --disable_wal=1 \
  154. --sync=0 \
  155. --threads=16 > ${STAT_FILE}.readrandom_mem_sst
  156. # fill up the db for readrandom benchmark with filluniquerandom (1GB total size)
  157. ./db_bench \
  158. --benchmarks=filluniquerandom \
  159. --db=$DATA_DIR \
  160. --use_existing_db=0 \
  161. --bloom_bits=10 \
  162. --num=$((NUM / 4)) \
  163. --writes=$((NUM / 4)) \
  164. --cache_size=6442450944 \
  165. --cache_numshardbits=6 \
  166. --table_cache_numshardbits=4 \
  167. --open_files=55000 \
  168. --statistics=1 \
  169. --histogram=1 \
  170. --disable_wal=1 \
  171. --sync=0 \
  172. --threads=1 > /dev/null
  173. # dummy test just to compact the data
  174. ./db_bench \
  175. --benchmarks=readrandom \
  176. --db=$DATA_DIR \
  177. --use_existing_db=1 \
  178. --bloom_bits=10 \
  179. --num=$((NUM / 1000)) \
  180. --reads=$((NUM / 1000)) \
  181. --cache_size=6442450944 \
  182. --cache_numshardbits=6 \
  183. --table_cache_numshardbits=4 \
  184. --open_files=55000 \
  185. --statistics=1 \
  186. --histogram=1 \
  187. --disable_wal=1 \
  188. --sync=0 \
  189. --threads=16 > /dev/null
  190. # measure readrandom after load with filluniquerandom with 6GB block cache
  191. ./db_bench \
  192. --benchmarks=readrandom \
  193. --db=$DATA_DIR \
  194. --use_existing_db=1 \
  195. --bloom_bits=10 \
  196. --num=$((NUM / 4)) \
  197. --reads=$((NUM / 4)) \
  198. --cache_size=6442450944 \
  199. --cache_numshardbits=6 \
  200. --table_cache_numshardbits=4 \
  201. --open_files=55000 \
  202. --disable_auto_compactions=1 \
  203. --statistics=1 \
  204. --histogram=1 \
  205. --disable_wal=1 \
  206. --sync=0 \
  207. --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom
  208. # measure readwhilewriting after load with filluniquerandom with 6GB block cache
  209. ./db_bench \
  210. --benchmarks=readwhilewriting \
  211. --db=$DATA_DIR \
  212. --use_existing_db=1 \
  213. --bloom_bits=10 \
  214. --num=$((NUM / 4)) \
  215. --reads=$((NUM / 4)) \
  216. --benchmark_write_rate_limit=$(( 110 * 1024 )) \
  217. --write_buffer_size=100000000 \
  218. --cache_size=6442450944 \
  219. --cache_numshardbits=6 \
  220. --table_cache_numshardbits=4 \
  221. --open_files=55000 \
  222. --statistics=1 \
  223. --histogram=1 \
  224. --disable_wal=1 \
  225. --sync=0 \
  226. --threads=16 > ${STAT_FILE}.readwhilewriting
  227. # measure memtable performance -- none of the data gets flushed to disk
  228. ./db_bench \
  229. --benchmarks=fillrandom,readrandom, \
  230. --db=$DATA_DIR \
  231. --use_existing_db=0 \
  232. --num=$((NUM / 10)) \
  233. --reads=$NUM \
  234. --cache_size=6442450944 \
  235. --cache_numshardbits=6 \
  236. --table_cache_numshardbits=4 \
  237. --write_buffer_size=1000000000 \
  238. --open_files=55000 \
  239. --statistics=1 \
  240. --histogram=1 \
  241. --disable_wal=1 \
  242. --sync=0 \
  243. --value_size=10 \
  244. --threads=16 > ${STAT_FILE}.memtablefillreadrandom
  245. common_in_mem_args="--db=/dev/shm/rocksdb \
  246. --num_levels=6 \
  247. --key_size=20 \
  248. --prefix_size=12 \
  249. --keys_per_prefix=10 \
  250. --value_size=100 \
  251. --compression_type=none \
  252. --compression_ratio=1 \
  253. --hard_rate_limit=2 \
  254. --write_buffer_size=134217728 \
  255. --max_write_buffer_number=4 \
  256. --level0_file_num_compaction_trigger=8 \
  257. --level0_slowdown_writes_trigger=16 \
  258. --level0_stop_writes_trigger=24 \
  259. --target_file_size_base=134217728 \
  260. --max_bytes_for_level_base=1073741824 \
  261. --disable_wal=0 \
  262. --wal_dir=/dev/shm/rocksdb \
  263. --sync=0 \
  264. --verify_checksum=1 \
  265. --delete_obsolete_files_period_micros=314572800 \
  266. --max_grandparent_overlap_factor=10 \
  267. --use_plain_table=1 \
  268. --open_files=-1 \
  269. --mmap_read=1 \
  270. --mmap_write=0 \
  271. --memtablerep=prefix_hash \
  272. --bloom_bits=10 \
  273. --bloom_locality=1 \
  274. --perf_level=0"
  275. # prepare a in-memory DB with 50M keys, total DB size is ~6G
  276. ./db_bench \
  277. $common_in_mem_args \
  278. --statistics=0 \
  279. --max_background_compactions=16 \
  280. --max_background_flushes=16 \
  281. --benchmarks=filluniquerandom \
  282. --use_existing_db=0 \
  283. --num=52428800 \
  284. --threads=1 > /dev/null
  285. # Readwhilewriting
  286. ./db_bench \
  287. $common_in_mem_args \
  288. --statistics=1 \
  289. --max_background_compactions=4 \
  290. --max_background_flushes=0 \
  291. --benchmarks=readwhilewriting\
  292. --use_existing_db=1 \
  293. --duration=600 \
  294. --threads=32 \
  295. --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram
  296. # Seekrandomwhilewriting
  297. ./db_bench \
  298. $common_in_mem_args \
  299. --statistics=1 \
  300. --max_background_compactions=4 \
  301. --max_background_flushes=0 \
  302. --benchmarks=seekrandomwhilewriting \
  303. --use_existing_db=1 \
  304. --use_tailing_iterator=1 \
  305. --duration=600 \
  306. --threads=32 \
  307. --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram
  308. # measure fillseq with bunch of column families
  309. ./db_bench \
  310. --benchmarks=fillseq \
  311. --num_column_families=500 \
  312. --write_buffer_size=1048576 \
  313. --db=$DATA_DIR \
  314. --use_existing_db=0 \
  315. --num=$NUM \
  316. --writes=$NUM \
  317. --open_files=55000 \
  318. --statistics=1 \
  319. --histogram=1 \
  320. --disable_wal=1 \
  321. --sync=0 > ${STAT_FILE}.fillseq_lots_column_families
  322. # measure overwrite performance with bunch of column families
  323. ./db_bench \
  324. --benchmarks=overwrite \
  325. --num_column_families=500 \
  326. --write_buffer_size=1048576 \
  327. --db=$DATA_DIR \
  328. --use_existing_db=1 \
  329. --num=$NUM \
  330. --writes=$((NUM / 10)) \
  331. --open_files=55000 \
  332. --statistics=1 \
  333. --histogram=1 \
  334. --disable_wal=1 \
  335. --sync=0 \
  336. --threads=8 > ${STAT_FILE}.overwrite_lots_column_families
  337. # send data to ods
  338. function send_to_ods {
  339. key="$1"
  340. value="$2"
  341. if [ -z $JENKINS_HOME ]; then
  342. # running on devbox, just print out the values
  343. echo $1 $2
  344. return
  345. fi
  346. if [ -z "$value" ];then
  347. echo >&2 "ERROR: Key $key doesn't have a value."
  348. return
  349. fi
  350. curl --silent "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \
  351. --connect-timeout 60
  352. }
  353. function send_benchmark_to_ods {
  354. bench="$1"
  355. bench_key="$2"
  356. file="$3"
  357. QPS=$(grep $bench $file | awk '{print $5}')
  358. P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' )
  359. P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' )
  360. P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' )
  361. send_to_ods rocksdb.build.$bench_key.qps $QPS
  362. send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS
  363. send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS
  364. send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS
  365. }
  366. send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite
  367. send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq
  368. send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom
  369. send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing
  370. send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache
  371. send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst
  372. send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
  373. send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
  374. send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
  375. send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
  376. send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram
  377. send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram
  378. send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families
  379. send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families