memory_test.cc 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. #ifndef ROCKSDB_LITE
  6. #include "db/db_impl/db_impl.h"
  7. #include "rocksdb/cache.h"
  8. #include "rocksdb/table.h"
  9. #include "rocksdb/utilities/memory_util.h"
  10. #include "rocksdb/utilities/stackable_db.h"
  11. #include "table/block_based/block_based_table_factory.h"
  12. #include "test_util/testharness.h"
  13. #include "test_util/testutil.h"
  14. #include "util/string_util.h"
  15. namespace ROCKSDB_NAMESPACE {
  16. class MemoryTest : public testing::Test {
  17. public:
  18. MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) {
  19. assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
  20. }
  21. std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
  22. std::string RandomString(int len) {
  23. std::string r;
  24. test::RandomString(&rnd_, len, &r);
  25. return r;
  26. }
  27. void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
  28. std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
  29. ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
  30. for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
  31. usage_history_[i].push_back(
  32. usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
  33. }
  34. }
  35. void GetCachePointersFromTableFactory(
  36. const TableFactory* factory,
  37. std::unordered_set<const Cache*>* cache_set) {
  38. const BlockBasedTableFactory* bbtf =
  39. dynamic_cast<const BlockBasedTableFactory*>(factory);
  40. if (bbtf != nullptr) {
  41. const auto bbt_opts = bbtf->table_options();
  42. cache_set->insert(bbt_opts.block_cache.get());
  43. cache_set->insert(bbt_opts.block_cache_compressed.get());
  44. }
  45. }
  46. void GetCachePointers(const std::vector<DB*>& dbs,
  47. std::unordered_set<const Cache*>* cache_set) {
  48. cache_set->clear();
  49. for (auto* db : dbs) {
  50. assert(db);
  51. // Cache from DBImpl
  52. StackableDB* sdb = dynamic_cast<StackableDB*>(db);
  53. DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
  54. if (db_impl != nullptr) {
  55. cache_set->insert(db_impl->TEST_table_cache());
  56. }
  57. // Cache from DBOptions
  58. cache_set->insert(db->GetDBOptions().row_cache.get());
  59. // Cache from table factories
  60. std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map;
  61. if (db_impl != nullptr) {
  62. ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
  63. }
  64. for (auto pair : iopts_map) {
  65. GetCachePointersFromTableFactory(pair.second->table_factory, cache_set);
  66. }
  67. }
  68. }
  69. Status GetApproximateMemoryUsageByType(
  70. const std::vector<DB*>& dbs,
  71. std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
  72. std::unordered_set<const Cache*> cache_set;
  73. GetCachePointers(dbs, &cache_set);
  74. return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
  75. usage_by_type);
  76. }
  77. const std::string kDbDir;
  78. Random rnd_;
  79. std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
  80. };
  81. TEST_F(MemoryTest, SharedBlockCacheTotal) {
  82. std::vector<DB*> dbs;
  83. std::vector<uint64_t> usage_by_type;
  84. const int kNumDBs = 10;
  85. const int kKeySize = 100;
  86. const int kValueSize = 500;
  87. Options opt;
  88. opt.create_if_missing = true;
  89. opt.write_buffer_size = kKeySize + kValueSize;
  90. opt.max_write_buffer_number = 10;
  91. opt.min_write_buffer_number_to_merge = 10;
  92. opt.disable_auto_compactions = true;
  93. BlockBasedTableOptions bbt_opts;
  94. bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
  95. for (int i = 0; i < kNumDBs; ++i) {
  96. DestroyDB(GetDBName(i), opt);
  97. DB* db = nullptr;
  98. ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
  99. dbs.push_back(db);
  100. }
  101. std::vector<std::string> keys_by_db[kNumDBs];
  102. // Fill one memtable per Put to make memtable use more memory.
  103. for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
  104. for (int i = 0; i < kNumDBs; ++i) {
  105. for (int j = 0; j < 100; ++j) {
  106. keys_by_db[i].emplace_back(RandomString(kKeySize));
  107. dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
  108. RandomString(kValueSize));
  109. }
  110. dbs[i]->Flush(FlushOptions());
  111. }
  112. }
  113. for (int i = 0; i < kNumDBs; ++i) {
  114. for (auto& key : keys_by_db[i]) {
  115. std::string value;
  116. dbs[i]->Get(ReadOptions(), key, &value);
  117. }
  118. UpdateUsagesHistory(dbs);
  119. }
  120. for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
  121. ++i) {
  122. // Expect EQ as we didn't flush more memtables.
  123. ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
  124. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  125. }
  126. for (int i = 0; i < kNumDBs; ++i) {
  127. delete dbs[i];
  128. }
  129. }
  130. TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
  131. std::vector<DB*> dbs;
  132. std::vector<uint64_t> usage_by_type;
  133. std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
  134. const int kNumDBs = 10;
  135. const int kKeySize = 100;
  136. const int kValueSize = 500;
  137. Options opt;
  138. opt.create_if_missing = true;
  139. opt.create_missing_column_families = true;
  140. opt.write_buffer_size = kKeySize + kValueSize;
  141. opt.max_write_buffer_number = 10;
  142. opt.min_write_buffer_number_to_merge = 10;
  143. opt.disable_auto_compactions = true;
  144. std::vector<ColumnFamilyDescriptor> cf_descs = {
  145. {kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
  146. {"one", ColumnFamilyOptions(opt)},
  147. {"two", ColumnFamilyOptions(opt)},
  148. };
  149. for (int i = 0; i < kNumDBs; ++i) {
  150. DestroyDB(GetDBName(i), opt);
  151. std::vector<ColumnFamilyHandle*> handles;
  152. dbs.emplace_back();
  153. vec_handles.emplace_back();
  154. ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
  155. &vec_handles.back(), &dbs.back()));
  156. }
  157. // Fill one memtable per Put to make memtable use more memory.
  158. for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
  159. for (int i = 0; i < kNumDBs; ++i) {
  160. for (auto* handle : vec_handles[i]) {
  161. dbs[i]->Put(WriteOptions(), handle, RandomString(kKeySize),
  162. RandomString(kValueSize));
  163. UpdateUsagesHistory(dbs);
  164. }
  165. }
  166. }
  167. // Expect the usage history is monotonically increasing
  168. for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
  169. ++i) {
  170. ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
  171. usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
  172. ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
  173. usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
  174. ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
  175. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  176. }
  177. size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
  178. std::vector<Iterator*> iters;
  179. // Create an iterator and flush all memtables for each db
  180. for (int i = 0; i < kNumDBs; ++i) {
  181. iters.push_back(dbs[i]->NewIterator(ReadOptions()));
  182. dbs[i]->Flush(FlushOptions());
  183. for (int j = 0; j < 100; ++j) {
  184. std::string value;
  185. dbs[i]->Get(ReadOptions(), RandomString(kKeySize), &value);
  186. }
  187. UpdateUsagesHistory(dbs);
  188. }
  189. for (size_t i = usage_check_point;
  190. i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
  191. // Since memtables are pinned by iterators, we don't expect the
  192. // memory usage of all the memtables decreases as they are pinned
  193. // by iterators.
  194. ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
  195. usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
  196. // Expect the usage history from the "usage_decay_point" is
  197. // monotonically decreasing.
  198. ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
  199. usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
  200. // Expect the usage history of the table readers increases
  201. // as we flush tables.
  202. ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
  203. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  204. ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
  205. usage_history_[MemoryUtil::kCacheTotal][i - 1]);
  206. }
  207. usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
  208. for (int i = 0; i < kNumDBs; ++i) {
  209. delete iters[i];
  210. UpdateUsagesHistory(dbs);
  211. }
  212. for (size_t i = usage_check_point;
  213. i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
  214. // Expect the usage of all memtables decreasing as we delete iterators.
  215. ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
  216. usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
  217. // Since the memory usage of un-flushed memtables is only affected
  218. // by Put and flush, we expect EQ here as we only delete iterators.
  219. ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
  220. usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
  221. // Expect EQ as we didn't flush more memtables.
  222. ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
  223. usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
  224. }
  225. for (int i = 0; i < kNumDBs; ++i) {
  226. for (auto* handle : vec_handles[i]) {
  227. delete handle;
  228. }
  229. delete dbs[i];
  230. }
  231. }
  232. } // namespace ROCKSDB_NAMESPACE
  233. int main(int argc, char** argv) {
  234. #if !(defined NDEBUG) || !defined(OS_WIN)
  235. ::testing::InitGoogleTest(&argc, argv);
  236. return RUN_ALL_TESTS();
  237. #else
  238. return 0;
  239. #endif
  240. }
  241. #else
  242. #include <cstdio>
  243. int main(int /*argc*/, char** /*argv*/) {
  244. printf("Skipped in RocksDBLite as utilities are not supported.\n");
  245. return 0;
  246. }
  247. #endif // !ROCKSDB_LITE