memory_test.cc 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. #include "db/db_impl/db_impl.h"
  6. #include "rocksdb/cache.h"
  7. #include "rocksdb/table.h"
  8. #include "rocksdb/utilities/memory_util.h"
  9. #include "rocksdb/utilities/stackable_db.h"
  10. #include "table/block_based/block_based_table_factory.h"
  11. #include "test_util/testharness.h"
  12. #include "test_util/testutil.h"
  13. #include "util/random.h"
  14. #include "util/string_util.h"
  15. namespace ROCKSDB_NAMESPACE {
  16. class MemoryTest : public testing::Test {
  17. public:
  18. MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) {
  19. assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
  20. }
  21. std::string GetDBName(int id) { return kDbDir + "db_" + std::to_string(id); }
  22. void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
  23. std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
  24. ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
  25. for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
  26. usage_history_[i].push_back(
  27. usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
  28. }
  29. }
  30. void GetCachePointers(const std::vector<DB*>& dbs,
  31. std::unordered_set<const Cache*>* cache_set) {
  32. cache_set->clear();
  33. for (auto* db : dbs) {
  34. assert(db);
  35. // Cache from DBImpl
  36. StackableDB* sdb = dynamic_cast<StackableDB*>(db);
  37. DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
  38. if (db_impl != nullptr) {
  39. cache_set->insert(db_impl->TEST_table_cache());
  40. }
  41. // Cache from DBOptions
  42. cache_set->insert(db->GetDBOptions().row_cache.get());
  43. // Cache from table factories
  44. if (db_impl != nullptr) {
  45. db_impl->TEST_GetAllBlockCaches(cache_set);
  46. }
  47. }
  48. }
  49. Status GetApproximateMemoryUsageByType(
  50. const std::vector<DB*>& dbs,
  51. std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
  52. std::unordered_set<const Cache*> cache_set;
  53. GetCachePointers(dbs, &cache_set);
  54. return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
  55. usage_by_type);
  56. }
  57. const std::string kDbDir;
  58. Random rnd_;
  59. std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
  60. };
  61. TEST_F(MemoryTest, SharedBlockCacheTotal) {
  62. std::vector<DB*> dbs;
  63. std::vector<uint64_t> usage_by_type;
  64. const int kNumDBs = 10;
  65. const int kKeySize = 100;
  66. const int kValueSize = 500;
  67. Options opt;
  68. opt.create_if_missing = true;
  69. opt.write_buffer_size = kKeySize + kValueSize;
  70. opt.max_write_buffer_number = 10;
  71. opt.min_write_buffer_number_to_merge = 10;
  72. opt.disable_auto_compactions = true;
  73. BlockBasedTableOptions bbt_opts;
  74. bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
  75. for (int i = 0; i < kNumDBs; ++i) {
  76. ASSERT_OK(DestroyDB(GetDBName(i), opt));
  77. DB* db = nullptr;
  78. ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
  79. dbs.push_back(db);
  80. }
  81. std::vector<std::string> keys_by_db[kNumDBs];
  82. // Fill one memtable per Put to make memtable use more memory.
  83. for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
  84. for (int i = 0; i < kNumDBs; ++i) {
  85. for (int j = 0; j < 100; ++j) {
  86. keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize));
  87. ASSERT_OK(dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
  88. rnd_.RandomString(kValueSize)));
  89. }
  90. ASSERT_OK(dbs[i]->Flush(FlushOptions()));
  91. }
  92. }
  93. for (int i = 0; i < kNumDBs; ++i) {
  94. for (auto& key : keys_by_db[i]) {
  95. std::string value;
  96. ASSERT_OK(dbs[i]->Get(ReadOptions(), key, &value));
  97. }
  98. UpdateUsagesHistory(dbs);
  99. }
  100. for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
  101. ++i) {
  102. // Expect EQ as we didn't flush more memtables.
  103. ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
  104. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  105. }
  106. for (int i = 0; i < kNumDBs; ++i) {
  107. delete dbs[i];
  108. }
  109. }
  110. TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
  111. std::vector<DB*> dbs;
  112. std::vector<uint64_t> usage_by_type;
  113. std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
  114. const int kNumDBs = 10;
  115. // These key/value sizes ensure each KV has its own memtable. Note that the
  116. // minimum write_buffer_size allowed is 64 KB.
  117. const int kKeySize = 100;
  118. const int kValueSize = 1 << 16;
  119. Options opt;
  120. opt.create_if_missing = true;
  121. opt.create_missing_column_families = true;
  122. opt.write_buffer_size = kKeySize + kValueSize;
  123. opt.max_write_buffer_number = 10;
  124. opt.min_write_buffer_number_to_merge = 10;
  125. opt.disable_auto_compactions = true;
  126. std::vector<ColumnFamilyDescriptor> cf_descs = {
  127. {kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
  128. {"one", ColumnFamilyOptions(opt)},
  129. {"two", ColumnFamilyOptions(opt)},
  130. };
  131. for (int i = 0; i < kNumDBs; ++i) {
  132. ASSERT_OK(DestroyDB(GetDBName(i), opt));
  133. std::vector<ColumnFamilyHandle*> handles;
  134. dbs.emplace_back();
  135. vec_handles.emplace_back();
  136. ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
  137. &vec_handles.back(), &dbs.back()));
  138. }
  139. // Fill one memtable per Put to make memtable use more memory.
  140. for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
  141. for (int i = 0; i < kNumDBs; ++i) {
  142. for (auto* handle : vec_handles[i]) {
  143. ASSERT_OK(dbs[i]->Put(WriteOptions(), handle,
  144. rnd_.RandomString(kKeySize),
  145. rnd_.RandomString(kValueSize)));
  146. UpdateUsagesHistory(dbs);
  147. }
  148. }
  149. }
  150. // Expect the usage history is monotonically increasing
  151. for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
  152. ++i) {
  153. ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
  154. usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
  155. ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
  156. usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
  157. ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
  158. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  159. }
  160. size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
  161. std::vector<Iterator*> iters;
  162. // Create an iterator and flush all memtables for each db
  163. for (int i = 0; i < kNumDBs; ++i) {
  164. iters.push_back(dbs[i]->NewIterator(ReadOptions()));
  165. ASSERT_OK(dbs[i]->Flush(FlushOptions()));
  166. for (int j = 0; j < 100; ++j) {
  167. std::string value;
  168. ASSERT_NOK(
  169. dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value));
  170. }
  171. UpdateUsagesHistory(dbs);
  172. }
  173. for (size_t i = usage_check_point;
  174. i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
  175. // Since memtables are pinned by iterators, we don't expect the
  176. // memory usage of all the memtables decreases as they are pinned
  177. // by iterators.
  178. ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
  179. usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
  180. // Expect the usage history from the "usage_decay_point" is
  181. // monotonically decreasing.
  182. ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
  183. usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
  184. // Expect the usage history of the table readers increases
  185. // as we flush tables.
  186. ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
  187. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  188. ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
  189. usage_history_[MemoryUtil::kCacheTotal][i - 1]);
  190. }
  191. usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
  192. for (int i = 0; i < kNumDBs; ++i) {
  193. // iterator is not used.
  194. ASSERT_OK(iters[i]->status());
  195. delete iters[i];
  196. UpdateUsagesHistory(dbs);
  197. }
  198. for (size_t i = usage_check_point;
  199. i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
  200. // Expect the usage of all memtables decreasing as we delete iterators.
  201. ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
  202. usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
  203. // Since the memory usage of un-flushed memtables is only affected
  204. // by Put and flush, we expect EQ here as we only delete iterators.
  205. ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
  206. usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
  207. // Expect EQ as we didn't flush more memtables.
  208. ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
  209. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  210. }
  211. for (int i = 0; i < kNumDBs; ++i) {
  212. for (auto* handle : vec_handles[i]) {
  213. delete handle;
  214. }
  215. delete dbs[i];
  216. }
  217. }
  218. } // namespace ROCKSDB_NAMESPACE
  219. int main(int argc, char** argv) {
  220. #if !(defined NDEBUG) || !defined(OS_WIN)
  221. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  222. ::testing::InitGoogleTest(&argc, argv);
  223. return RUN_ALL_TESTS();
  224. #else
  225. return 0;
  226. #endif
  227. }