write_buffer_manager_test.cc 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "rocksdb/write_buffer_manager.h"
  10. #include "rocksdb/advanced_cache.h"
  11. #include "test_util/testharness.h"
  12. namespace ROCKSDB_NAMESPACE {
  13. class WriteBufferManagerTest : public testing::Test {};
  14. const size_t kSizeDummyEntry = 256 * 1024;
  15. TEST_F(WriteBufferManagerTest, ShouldFlush) {
  16. // A write buffer manager of size 10MB
  17. std::unique_ptr<WriteBufferManager> wbf(
  18. new WriteBufferManager(10 * 1024 * 1024));
  19. wbf->ReserveMem(8 * 1024 * 1024);
  20. ASSERT_FALSE(wbf->ShouldFlush());
  21. // 90% of the hard limit will hit the condition
  22. wbf->ReserveMem(1 * 1024 * 1024);
  23. ASSERT_TRUE(wbf->ShouldFlush());
  24. // Scheduling for freeing will release the condition
  25. wbf->ScheduleFreeMem(1 * 1024 * 1024);
  26. ASSERT_FALSE(wbf->ShouldFlush());
  27. wbf->ReserveMem(2 * 1024 * 1024);
  28. ASSERT_TRUE(wbf->ShouldFlush());
  29. wbf->ScheduleFreeMem(4 * 1024 * 1024);
  30. // 11MB total, 6MB mutable. hard limit still hit
  31. ASSERT_TRUE(wbf->ShouldFlush());
  32. wbf->ScheduleFreeMem(2 * 1024 * 1024);
  33. // 11MB total, 4MB mutable. hard limit stills but won't flush because more
  34. // than half data is already being flushed.
  35. ASSERT_FALSE(wbf->ShouldFlush());
  36. wbf->ReserveMem(4 * 1024 * 1024);
  37. // 15 MB total, 8MB mutable.
  38. ASSERT_TRUE(wbf->ShouldFlush());
  39. wbf->FreeMem(7 * 1024 * 1024);
  40. // 8MB total, 8MB mutable.
  41. ASSERT_FALSE(wbf->ShouldFlush());
  42. // change size: 8M limit, 7M mutable limit
  43. wbf->SetBufferSize(8 * 1024 * 1024);
  44. // 8MB total, 8MB mutable.
  45. ASSERT_TRUE(wbf->ShouldFlush());
  46. wbf->ScheduleFreeMem(2 * 1024 * 1024);
  47. // 8MB total, 6MB mutable.
  48. ASSERT_TRUE(wbf->ShouldFlush());
  49. wbf->FreeMem(2 * 1024 * 1024);
  50. // 6MB total, 6MB mutable.
  51. ASSERT_FALSE(wbf->ShouldFlush());
  52. wbf->ReserveMem(1 * 1024 * 1024);
  53. // 7MB total, 7MB mutable.
  54. ASSERT_FALSE(wbf->ShouldFlush());
  55. wbf->ReserveMem(1 * 1024 * 1024);
  56. // 8MB total, 8MB mutable.
  57. ASSERT_TRUE(wbf->ShouldFlush());
  58. wbf->ScheduleFreeMem(1 * 1024 * 1024);
  59. wbf->FreeMem(1 * 1024 * 1024);
  60. // 7MB total, 7MB mutable.
  61. ASSERT_FALSE(wbf->ShouldFlush());
  62. }
  63. class ChargeWriteBufferTest : public testing::Test {};
  64. TEST_F(ChargeWriteBufferTest, Basic) {
  65. constexpr std::size_t kMetaDataChargeOverhead = 10000;
  66. LRUCacheOptions co;
  67. // 1GB cache
  68. co.capacity = 1024 * 1024 * 1024;
  69. co.num_shard_bits = 4;
  70. co.metadata_charge_policy = kDontChargeCacheMetadata;
  71. std::shared_ptr<Cache> cache = NewLRUCache(co);
  72. // A write buffer manager of size 50MB
  73. std::unique_ptr<WriteBufferManager> wbf(
  74. new WriteBufferManager(50 * 1024 * 1024, cache));
  75. // Allocate 333KB will allocate 512KB, memory_used_ = 333KB
  76. wbf->ReserveMem(333 * 1024);
  77. // 2 dummy entries are added for size 333 KB
  78. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 2 * kSizeDummyEntry);
  79. ASSERT_GE(cache->GetPinnedUsage(), 2 * 256 * 1024);
  80. ASSERT_LT(cache->GetPinnedUsage(), 2 * 256 * 1024 + kMetaDataChargeOverhead);
  81. // Allocate another 512KB, memory_used_ = 845KB
  82. wbf->ReserveMem(512 * 1024);
  83. // 2 more dummy entries are added for size 512 KB
  84. // since ceil((memory_used_ - dummy_entries_in_cache_usage) % kSizeDummyEntry)
  85. // = 2
  86. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 4 * kSizeDummyEntry);
  87. ASSERT_GE(cache->GetPinnedUsage(), 4 * 256 * 1024);
  88. ASSERT_LT(cache->GetPinnedUsage(), 4 * 256 * 1024 + kMetaDataChargeOverhead);
  89. // Allocate another 10MB, memory_used_ = 11085KB
  90. wbf->ReserveMem(10 * 1024 * 1024);
  91. // 40 more entries are added for size 10 * 1024 * 1024 KB
  92. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
  93. ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
  94. ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
  95. // Free 1MB, memory_used_ = 10061KB
  96. // It will not cause any change in cache cost
  97. // since memory_used_ > dummy_entries_in_cache_usage * (3/4)
  98. wbf->FreeMem(1 * 1024 * 1024);
  99. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
  100. ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
  101. ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
  102. ASSERT_FALSE(wbf->ShouldFlush());
  103. // Allocate another 41MB, memory_used_ = 52045KB
  104. wbf->ReserveMem(41 * 1024 * 1024);
  105. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 204 * kSizeDummyEntry);
  106. ASSERT_GE(cache->GetPinnedUsage(), 204 * 256 * 1024);
  107. ASSERT_LT(cache->GetPinnedUsage(),
  108. 204 * 256 * 1024 + kMetaDataChargeOverhead);
  109. ASSERT_TRUE(wbf->ShouldFlush());
  110. ASSERT_TRUE(wbf->ShouldFlush());
  111. // Schedule free 20MB, memory_used_ = 52045KB
  112. // It will not cause any change in memory_used and cache cost
  113. wbf->ScheduleFreeMem(20 * 1024 * 1024);
  114. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 204 * kSizeDummyEntry);
  115. ASSERT_GE(cache->GetPinnedUsage(), 204 * 256 * 1024);
  116. ASSERT_LT(cache->GetPinnedUsage(),
  117. 204 * 256 * 1024 + kMetaDataChargeOverhead);
  118. // Still need flush as the hard limit hits
  119. ASSERT_TRUE(wbf->ShouldFlush());
  120. // Free 20MB, memory_used_ = 31565KB
  121. // It will releae 80 dummy entries from cache since
  122. // since memory_used_ < dummy_entries_in_cache_usage * (3/4)
  123. // and floor((dummy_entries_in_cache_usage - memory_used_) % kSizeDummyEntry)
  124. // = 80
  125. wbf->FreeMem(20 * 1024 * 1024);
  126. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 124 * kSizeDummyEntry);
  127. ASSERT_GE(cache->GetPinnedUsage(), 124 * 256 * 1024);
  128. ASSERT_LT(cache->GetPinnedUsage(),
  129. 124 * 256 * 1024 + kMetaDataChargeOverhead);
  130. ASSERT_FALSE(wbf->ShouldFlush());
  131. // Free 16KB, memory_used_ = 31549KB
  132. // It will not release any dummy entry since memory_used_ >=
  133. // dummy_entries_in_cache_usage * (3/4)
  134. wbf->FreeMem(16 * 1024);
  135. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 124 * kSizeDummyEntry);
  136. ASSERT_GE(cache->GetPinnedUsage(), 124 * 256 * 1024);
  137. ASSERT_LT(cache->GetPinnedUsage(),
  138. 124 * 256 * 1024 + kMetaDataChargeOverhead);
  139. // Free 20MB, memory_used_ = 11069KB
  140. // It will releae 80 dummy entries from cache
  141. // since memory_used_ < dummy_entries_in_cache_usage * (3/4)
  142. // and floor((dummy_entries_in_cache_usage - memory_used_) % kSizeDummyEntry)
  143. // = 80
  144. wbf->FreeMem(20 * 1024 * 1024);
  145. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
  146. ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
  147. ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
  148. // Free 1MB, memory_used_ = 10045KB
  149. // It will not cause any change in cache cost
  150. // since memory_used_ > dummy_entries_in_cache_usage * (3/4)
  151. wbf->FreeMem(1 * 1024 * 1024);
  152. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
  153. ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
  154. ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
  155. // Reserve 512KB, memory_used_ = 10557KB
  156. // It will not casue any change in cache cost
  157. // since memory_used_ > dummy_entries_in_cache_usage * (3/4)
  158. // which reflects the benefit of saving dummy entry insertion on memory
  159. // reservation after delay decrease
  160. wbf->ReserveMem(512 * 1024);
  161. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
  162. ASSERT_GE(cache->GetPinnedUsage(), 44 * 256 * 1024);
  163. ASSERT_LT(cache->GetPinnedUsage(), 44 * 256 * 1024 + kMetaDataChargeOverhead);
  164. // Destroy write buffer manger should free everything
  165. wbf.reset();
  166. ASSERT_EQ(cache->GetPinnedUsage(), 0);
  167. }
  168. TEST_F(ChargeWriteBufferTest, BasicWithNoBufferSizeLimit) {
  169. constexpr std::size_t kMetaDataChargeOverhead = 10000;
  170. // 1GB cache
  171. std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024 * 1024, 4);
  172. // A write buffer manager of size 256MB
  173. std::unique_ptr<WriteBufferManager> wbf(new WriteBufferManager(0, cache));
  174. // Allocate 10MB, memory_used_ = 10240KB
  175. // It will allocate 40 dummy entries
  176. wbf->ReserveMem(10 * 1024 * 1024);
  177. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 40 * kSizeDummyEntry);
  178. ASSERT_GE(cache->GetPinnedUsage(), 40 * 256 * 1024);
  179. ASSERT_LT(cache->GetPinnedUsage(), 40 * 256 * 1024 + kMetaDataChargeOverhead);
  180. ASSERT_FALSE(wbf->ShouldFlush());
  181. // Free 9MB, memory_used_ = 1024KB
  182. // It will free 36 dummy entries
  183. wbf->FreeMem(9 * 1024 * 1024);
  184. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 4 * kSizeDummyEntry);
  185. ASSERT_GE(cache->GetPinnedUsage(), 4 * 256 * 1024);
  186. ASSERT_LT(cache->GetPinnedUsage(), 4 * 256 * 1024 + kMetaDataChargeOverhead);
  187. // Free 160KB gradually, memory_used_ = 864KB
  188. // It will not cause any change
  189. // since memory_used_ > dummy_entries_in_cache_usage * 3/4
  190. for (int i = 0; i < 40; i++) {
  191. wbf->FreeMem(4 * 1024);
  192. }
  193. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 4 * kSizeDummyEntry);
  194. ASSERT_GE(cache->GetPinnedUsage(), 4 * 256 * 1024);
  195. ASSERT_LT(cache->GetPinnedUsage(), 4 * 256 * 1024 + kMetaDataChargeOverhead);
  196. }
  197. TEST_F(ChargeWriteBufferTest, BasicWithCacheFull) {
  198. constexpr std::size_t kMetaDataChargeOverhead = 20000;
  199. // 12MB cache size with strict capacity
  200. LRUCacheOptions lo;
  201. lo.capacity = 12 * 1024 * 1024;
  202. lo.num_shard_bits = 0;
  203. lo.strict_capacity_limit = true;
  204. std::shared_ptr<Cache> cache = NewLRUCache(lo);
  205. std::unique_ptr<WriteBufferManager> wbf(new WriteBufferManager(0, cache));
  206. // Allocate 10MB, memory_used_ = 10240KB
  207. wbf->ReserveMem(10 * 1024 * 1024);
  208. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 40 * kSizeDummyEntry);
  209. ASSERT_GE(cache->GetPinnedUsage(), 40 * kSizeDummyEntry);
  210. ASSERT_LT(cache->GetPinnedUsage(),
  211. 40 * kSizeDummyEntry + kMetaDataChargeOverhead);
  212. // Allocate 10MB, memory_used_ = 20480KB
  213. // Some dummy entry insertion will fail due to full cache
  214. wbf->ReserveMem(10 * 1024 * 1024);
  215. ASSERT_GE(cache->GetPinnedUsage(), 40 * kSizeDummyEntry);
  216. ASSERT_LE(cache->GetPinnedUsage(), 12 * 1024 * 1024);
  217. ASSERT_LT(wbf->dummy_entries_in_cache_usage(), 80 * kSizeDummyEntry);
  218. // Free 15MB after encoutering cache full, memory_used_ = 5120KB
  219. wbf->FreeMem(15 * 1024 * 1024);
  220. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 20 * kSizeDummyEntry);
  221. ASSERT_GE(cache->GetPinnedUsage(), 20 * kSizeDummyEntry);
  222. ASSERT_LT(cache->GetPinnedUsage(),
  223. 20 * kSizeDummyEntry + kMetaDataChargeOverhead);
  224. // Reserve 15MB, creating cache full again, memory_used_ = 20480KB
  225. wbf->ReserveMem(15 * 1024 * 1024);
  226. ASSERT_LE(cache->GetPinnedUsage(), 12 * 1024 * 1024);
  227. ASSERT_LT(wbf->dummy_entries_in_cache_usage(), 80 * kSizeDummyEntry);
  228. // Increase capacity so next insert will fully succeed
  229. cache->SetCapacity(40 * 1024 * 1024);
  230. // Allocate 10MB, memory_used_ = 30720KB
  231. wbf->ReserveMem(10 * 1024 * 1024);
  232. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 120 * kSizeDummyEntry);
  233. ASSERT_GE(cache->GetPinnedUsage(), 120 * kSizeDummyEntry);
  234. ASSERT_LT(cache->GetPinnedUsage(),
  235. 120 * kSizeDummyEntry + kMetaDataChargeOverhead);
  236. // Gradually release 20 MB
  237. // It ended up sequentially releasing 32, 24, 18 dummy entries when
  238. // memory_used_ decreases to 22528KB, 16384KB, 11776KB.
  239. // In total, it releases 74 dummy entries
  240. for (int i = 0; i < 40; i++) {
  241. wbf->FreeMem(512 * 1024);
  242. }
  243. ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 46 * kSizeDummyEntry);
  244. ASSERT_GE(cache->GetPinnedUsage(), 46 * kSizeDummyEntry);
  245. ASSERT_LT(cache->GetPinnedUsage(),
  246. 46 * kSizeDummyEntry + kMetaDataChargeOverhead);
  247. }
  248. } // namespace ROCKSDB_NAMESPACE
  249. int main(int argc, char** argv) {
  250. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  251. ::testing::InitGoogleTest(&argc, argv);
  252. return RUN_ALL_TESTS();
  253. }