arena_test.cc 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "memory/arena.h"
  10. #ifndef OS_WIN
  11. #include <sys/resource.h>
  12. #endif
  13. #include "port/jemalloc_helper.h"
  14. #include "port/port.h"
  15. #include "test_util/testharness.h"
  16. #include "util/random.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. namespace {
  19. const size_t kHugePageSize = 2 * 1024 * 1024;
  20. } // namespace
  21. class ArenaTest : public testing::Test {};
  22. TEST_F(ArenaTest, Empty) { Arena arena0; }
  23. namespace {
  24. bool CheckMemoryAllocated(size_t allocated, size_t expected) {
  25. // The value returned by Arena::MemoryAllocatedBytes() may be greater than
  26. // the requested memory. We choose a somewhat arbitrary upper bound of
  27. // max_expected = expected * 1.1 to detect critical overallocation.
  28. size_t max_expected = expected + expected / 10;
  29. return allocated >= expected && allocated <= max_expected;
  30. }
  31. void MemoryAllocatedBytesTest(size_t huge_page_size) {
  32. const int N = 17;
  33. size_t req_sz; // requested size
  34. size_t bsz = 32 * 1024; // block size
  35. size_t expected_memory_allocated;
  36. Arena arena(bsz, nullptr, huge_page_size);
  37. // requested size > quarter of a block:
  38. // allocate requested size separately
  39. req_sz = 12 * 1024;
  40. for (int i = 0; i < N; i++) {
  41. arena.Allocate(req_sz);
  42. }
  43. expected_memory_allocated = req_sz * N + Arena::kInlineSize;
  44. ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
  45. expected_memory_allocated);
  46. arena.Allocate(Arena::kInlineSize - 1);
  47. // requested size < quarter of a block:
  48. // allocate a block with the default size, then try to use unused part
  49. // of the block. So one new block will be allocated for the first
  50. // Allocate(99) call. All the remaining calls won't lead to new allocation.
  51. req_sz = 99;
  52. for (int i = 0; i < N; i++) {
  53. arena.Allocate(req_sz);
  54. }
  55. if (huge_page_size) {
  56. ASSERT_TRUE(
  57. CheckMemoryAllocated(arena.MemoryAllocatedBytes(),
  58. expected_memory_allocated + bsz) ||
  59. CheckMemoryAllocated(arena.MemoryAllocatedBytes(),
  60. expected_memory_allocated + huge_page_size));
  61. } else {
  62. expected_memory_allocated += bsz;
  63. ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
  64. expected_memory_allocated);
  65. }
  66. // requested size > size of a block:
  67. // allocate requested size separately
  68. expected_memory_allocated = arena.MemoryAllocatedBytes();
  69. req_sz = 8 * 1024 * 1024;
  70. for (int i = 0; i < N; i++) {
  71. arena.Allocate(req_sz);
  72. }
  73. expected_memory_allocated += req_sz * N;
  74. ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
  75. expected_memory_allocated);
  76. }
  77. // Make sure we didn't count the allocate but not used memory space in
  78. // Arena::ApproximateMemoryUsage()
  79. static void ApproximateMemoryUsageTest(size_t huge_page_size) {
  80. const size_t kBlockSize = 4096;
  81. const size_t kEntrySize = kBlockSize / 8;
  82. const size_t kZero = 0;
  83. Arena arena(kBlockSize, nullptr, huge_page_size);
  84. ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
  85. // allocate inline bytes
  86. const size_t kAlignUnit = alignof(max_align_t);
  87. EXPECT_TRUE(arena.IsInInlineBlock());
  88. arena.AllocateAligned(kAlignUnit);
  89. EXPECT_TRUE(arena.IsInInlineBlock());
  90. arena.AllocateAligned(Arena::kInlineSize / 2 - (2 * kAlignUnit));
  91. EXPECT_TRUE(arena.IsInInlineBlock());
  92. arena.AllocateAligned(Arena::kInlineSize / 2);
  93. EXPECT_TRUE(arena.IsInInlineBlock());
  94. ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - kAlignUnit);
  95. ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
  96. Arena::kInlineSize);
  97. auto num_blocks = kBlockSize / kEntrySize;
  98. // first allocation
  99. arena.AllocateAligned(kEntrySize);
  100. EXPECT_FALSE(arena.IsInInlineBlock());
  101. auto mem_usage = arena.MemoryAllocatedBytes();
  102. if (huge_page_size) {
  103. ASSERT_TRUE(
  104. CheckMemoryAllocated(mem_usage, kBlockSize + Arena::kInlineSize) ||
  105. CheckMemoryAllocated(mem_usage, huge_page_size + Arena::kInlineSize));
  106. } else {
  107. ASSERT_PRED2(CheckMemoryAllocated, mem_usage,
  108. kBlockSize + Arena::kInlineSize);
  109. }
  110. auto usage = arena.ApproximateMemoryUsage();
  111. ASSERT_LT(usage, mem_usage);
  112. for (size_t i = 1; i < num_blocks; ++i) {
  113. arena.AllocateAligned(kEntrySize);
  114. ASSERT_EQ(mem_usage, arena.MemoryAllocatedBytes());
  115. ASSERT_EQ(arena.ApproximateMemoryUsage(), usage + kEntrySize);
  116. EXPECT_FALSE(arena.IsInInlineBlock());
  117. usage = arena.ApproximateMemoryUsage();
  118. }
  119. if (huge_page_size) {
  120. ASSERT_TRUE(usage > mem_usage ||
  121. usage + huge_page_size - kBlockSize == mem_usage);
  122. } else {
  123. ASSERT_GT(usage, mem_usage);
  124. }
  125. }
  126. static void SimpleTest(size_t huge_page_size) {
  127. std::vector<std::pair<size_t, char*>> allocated;
  128. Arena arena(Arena::kMinBlockSize, nullptr, huge_page_size);
  129. const int N = 100000;
  130. size_t bytes = 0;
  131. Random rnd(301);
  132. for (int i = 0; i < N; i++) {
  133. size_t s;
  134. if (i % (N / 10) == 0) {
  135. s = i;
  136. } else {
  137. s = rnd.OneIn(4000)
  138. ? rnd.Uniform(6000)
  139. : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
  140. }
  141. if (s == 0) {
  142. // Our arena disallows size 0 allocations.
  143. s = 1;
  144. }
  145. char* r;
  146. if (rnd.OneIn(10)) {
  147. r = arena.AllocateAligned(s);
  148. } else {
  149. r = arena.Allocate(s);
  150. }
  151. for (unsigned int b = 0; b < s; b++) {
  152. // Fill the "i"th allocation with a known bit pattern
  153. r[b] = i % 256;
  154. }
  155. bytes += s;
  156. allocated.emplace_back(s, r);
  157. ASSERT_GE(arena.ApproximateMemoryUsage(), bytes);
  158. if (i > N / 10) {
  159. ASSERT_LE(arena.ApproximateMemoryUsage(), bytes * 1.10);
  160. }
  161. }
  162. for (unsigned int i = 0; i < allocated.size(); i++) {
  163. size_t num_bytes = allocated[i].first;
  164. const char* p = allocated[i].second;
  165. for (unsigned int b = 0; b < num_bytes; b++) {
  166. // Check the "i"th allocation for the known bit pattern
  167. ASSERT_EQ(int(p[b]) & 0xff, (int)(i % 256));
  168. }
  169. }
  170. }
  171. } // namespace
  172. TEST_F(ArenaTest, MemoryAllocatedBytes) {
  173. MemoryAllocatedBytesTest(0);
  174. MemoryAllocatedBytesTest(kHugePageSize);
  175. }
  176. TEST_F(ArenaTest, ApproximateMemoryUsage) {
  177. ApproximateMemoryUsageTest(0);
  178. ApproximateMemoryUsageTest(kHugePageSize);
  179. }
  180. TEST_F(ArenaTest, Simple) {
  181. SimpleTest(0);
  182. SimpleTest(kHugePageSize);
  183. }
  184. // Number of minor page faults since last call
  185. size_t PopMinorPageFaultCount() {
  186. #ifdef RUSAGE_SELF
  187. static long prev = 0;
  188. struct rusage usage;
  189. EXPECT_EQ(getrusage(RUSAGE_SELF, &usage), 0);
  190. size_t rv = usage.ru_minflt - prev;
  191. prev = usage.ru_minflt;
  192. return rv;
  193. #else
  194. // Conservative
  195. return SIZE_MAX;
  196. #endif // RUSAGE_SELF
  197. }
  198. TEST(MmapTest, AllocateLazyZeroed) {
  199. // Doesn't have to be page aligned
  200. constexpr size_t len = 1234567; // in bytes
  201. constexpr size_t count = len / 8; // in uint64_t objects
  202. // Implicit conversion move
  203. TypedMemMapping<uint64_t> pre_arr = MemMapping::AllocateLazyZeroed(len);
  204. // Move from same type
  205. TypedMemMapping<uint64_t> arr = std::move(pre_arr);
  206. ASSERT_NE(arr.Get(), nullptr);
  207. ASSERT_EQ(arr.Get(), &arr[0]);
  208. ASSERT_EQ(arr.Get(), arr.MemMapping::Get());
  209. ASSERT_EQ(arr.Length(), len);
  210. ASSERT_EQ(arr.Count(), count);
  211. // Start counting page faults
  212. PopMinorPageFaultCount();
  213. // Access half of the allocation
  214. size_t i = 0;
  215. for (; i < count / 2; ++i) {
  216. ASSERT_EQ(arr[i], 0);
  217. arr[i] = i;
  218. }
  219. // Appropriate page faults (maybe more)
  220. size_t faults = PopMinorPageFaultCount();
  221. ASSERT_GE(faults, len / 2 / port::kPageSize);
  222. // Access rest of the allocation
  223. for (; i < count; ++i) {
  224. ASSERT_EQ(arr[i], 0);
  225. arr[i] = i;
  226. }
  227. // Appropriate page faults (maybe more)
  228. faults = PopMinorPageFaultCount();
  229. ASSERT_GE(faults, len / 2 / port::kPageSize);
  230. // Verify data
  231. for (i = 0; i < count; ++i) {
  232. ASSERT_EQ(arr[i], i);
  233. }
  234. }
  235. TEST_F(ArenaTest, UnmappedAllocation) {
  236. // Verify that it's possible to get unmapped pages in large allocations,
  237. // for memory efficiency and to ensure we don't accidentally waste time &
  238. // space initializing the memory.
  239. #ifdef ROCKSDB_JEMALLOC
  240. // With Jemalloc config.fill, the pages are written to before we get them
  241. uint8_t fill = 0;
  242. size_t fill_sz = sizeof(fill);
  243. mallctl("config.fill", &fill, &fill_sz, nullptr, 0);
  244. if (fill) {
  245. ROCKSDB_GTEST_BYPASS("Test skipped because of config.fill==true");
  246. return;
  247. }
  248. #endif // ROCKSDB_JEMALLOC
  249. // This block size value is smaller than the smallest x86 huge page size,
  250. // so should not be fulfilled by a transparent huge page mapping.
  251. constexpr size_t kBlockSize = 1U << 20;
  252. Arena arena(kBlockSize);
  253. // The allocator might give us back recycled memory for a while, but
  254. // shouldn't last forever.
  255. for (int i = 0;; ++i) {
  256. char* p = arena.Allocate(kBlockSize);
  257. // Start counting page faults
  258. PopMinorPageFaultCount();
  259. // Overwrite the whole allocation
  260. for (size_t j = 0; j < kBlockSize; ++j) {
  261. p[j] = static_cast<char>(j & 255);
  262. }
  263. size_t faults = PopMinorPageFaultCount();
  264. if (faults >= kBlockSize * 3 / 4 / port::kPageSize) {
  265. // Most of the access generated page faults => GOOD
  266. break;
  267. }
  268. // Should have succeeded after enough tries
  269. ASSERT_LT(i, 1000);
  270. }
  271. }
  272. } // namespace ROCKSDB_NAMESPACE
  273. int main(int argc, char** argv) {
  274. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  275. ::testing::InitGoogleTest(&argc, argv);
  276. return RUN_ALL_TESTS();
  277. }