arena.cc 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "memory/arena.h"
  10. #ifndef OS_WIN
  11. #include <sys/mman.h>
  12. #endif
  13. #include <algorithm>
  14. #include "logging/logging.h"
  15. #include "port/malloc.h"
  16. #include "port/port.h"
  17. #include "rocksdb/env.h"
  18. #include "test_util/sync_point.h"
  19. namespace ROCKSDB_NAMESPACE {
  20. // MSVC complains that it is already defined since it is static in the header.
  21. #ifndef _MSC_VER
  22. const size_t Arena::kInlineSize;
  23. #endif
  24. const size_t Arena::kMinBlockSize = 4096;
  25. const size_t Arena::kMaxBlockSize = 2u << 30;
  26. static const int kAlignUnit = alignof(max_align_t);
  27. size_t OptimizeBlockSize(size_t block_size) {
  28. // Make sure block_size is in optimal range
  29. block_size = std::max(Arena::kMinBlockSize, block_size);
  30. block_size = std::min(Arena::kMaxBlockSize, block_size);
  31. // make sure block_size is the multiple of kAlignUnit
  32. if (block_size % kAlignUnit != 0) {
  33. block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
  34. }
  35. return block_size;
  36. }
  37. Arena::Arena(size_t block_size, AllocTracker* tracker, size_t huge_page_size)
  38. : kBlockSize(OptimizeBlockSize(block_size)), tracker_(tracker) {
  39. assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
  40. kBlockSize % kAlignUnit == 0);
  41. TEST_SYNC_POINT_CALLBACK("Arena::Arena:0", const_cast<size_t*>(&kBlockSize));
  42. alloc_bytes_remaining_ = sizeof(inline_block_);
  43. blocks_memory_ += alloc_bytes_remaining_;
  44. aligned_alloc_ptr_ = inline_block_;
  45. unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
  46. #ifdef MAP_HUGETLB
  47. hugetlb_size_ = huge_page_size;
  48. if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
  49. hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
  50. }
  51. #else
  52. (void)huge_page_size;
  53. #endif
  54. if (tracker_ != nullptr) {
  55. tracker_->Allocate(kInlineSize);
  56. }
  57. }
  58. Arena::~Arena() {
  59. if (tracker_ != nullptr) {
  60. assert(tracker_->is_freed());
  61. tracker_->FreeMem();
  62. }
  63. for (const auto& block : blocks_) {
  64. delete[] block;
  65. }
  66. #ifdef MAP_HUGETLB
  67. for (const auto& mmap_info : huge_blocks_) {
  68. if (mmap_info.addr_ == nullptr) {
  69. continue;
  70. }
  71. auto ret = munmap(mmap_info.addr_, mmap_info.length_);
  72. if (ret != 0) {
  73. // TODO(sdong): Better handling
  74. }
  75. }
  76. #endif
  77. }
  78. char* Arena::AllocateFallback(size_t bytes, bool aligned) {
  79. if (bytes > kBlockSize / 4) {
  80. ++irregular_block_num;
  81. // Object is more than a quarter of our block size. Allocate it separately
  82. // to avoid wasting too much space in leftover bytes.
  83. return AllocateNewBlock(bytes);
  84. }
  85. // We waste the remaining space in the current block.
  86. size_t size = 0;
  87. char* block_head = nullptr;
  88. #ifdef MAP_HUGETLB
  89. if (hugetlb_size_) {
  90. size = hugetlb_size_;
  91. block_head = AllocateFromHugePage(size);
  92. }
  93. #endif
  94. if (!block_head) {
  95. size = kBlockSize;
  96. block_head = AllocateNewBlock(size);
  97. }
  98. alloc_bytes_remaining_ = size - bytes;
  99. if (aligned) {
  100. aligned_alloc_ptr_ = block_head + bytes;
  101. unaligned_alloc_ptr_ = block_head + size;
  102. return block_head;
  103. } else {
  104. aligned_alloc_ptr_ = block_head;
  105. unaligned_alloc_ptr_ = block_head + size - bytes;
  106. return unaligned_alloc_ptr_;
  107. }
  108. }
  109. char* Arena::AllocateFromHugePage(size_t bytes) {
  110. #ifdef MAP_HUGETLB
  111. if (hugetlb_size_ == 0) {
  112. return nullptr;
  113. }
  114. // Reserve space in `huge_blocks_` before calling `mmap`.
  115. // Use `emplace_back()` instead of `reserve()` to let std::vector manage its
  116. // own memory and do fewer reallocations.
  117. //
  118. // - If `emplace_back` throws, no memory leaks because we haven't called
  119. // `mmap` yet.
  120. // - If `mmap` throws, no memory leaks because the vector will be cleaned up
  121. // via RAII.
  122. huge_blocks_.emplace_back(nullptr /* addr */, 0 /* length */);
  123. void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
  124. (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), -1, 0);
  125. if (addr == MAP_FAILED) {
  126. return nullptr;
  127. }
  128. huge_blocks_.back() = MmapInfo(addr, bytes);
  129. blocks_memory_ += bytes;
  130. if (tracker_ != nullptr) {
  131. tracker_->Allocate(bytes);
  132. }
  133. return reinterpret_cast<char*>(addr);
  134. #else
  135. (void)bytes;
  136. return nullptr;
  137. #endif
  138. }
  139. char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
  140. Logger* logger) {
  141. assert((kAlignUnit & (kAlignUnit - 1)) ==
  142. 0); // Pointer size should be a power of 2
  143. #ifdef MAP_HUGETLB
  144. if (huge_page_size > 0 && bytes > 0) {
  145. // Allocate from a huge page TBL table.
  146. assert(logger != nullptr); // logger need to be passed in.
  147. size_t reserved_size =
  148. ((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
  149. assert(reserved_size >= bytes);
  150. char* addr = AllocateFromHugePage(reserved_size);
  151. if (addr == nullptr) {
  152. ROCKS_LOG_WARN(logger,
  153. "AllocateAligned fail to allocate huge TLB pages: %s",
  154. strerror(errno));
  155. // fail back to malloc
  156. } else {
  157. return addr;
  158. }
  159. }
  160. #else
  161. (void)huge_page_size;
  162. (void)logger;
  163. #endif
  164. size_t current_mod =
  165. reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
  166. size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
  167. size_t needed = bytes + slop;
  168. char* result;
  169. if (needed <= alloc_bytes_remaining_) {
  170. result = aligned_alloc_ptr_ + slop;
  171. aligned_alloc_ptr_ += needed;
  172. alloc_bytes_remaining_ -= needed;
  173. } else {
  174. // AllocateFallback always returns aligned memory
  175. result = AllocateFallback(bytes, true /* aligned */);
  176. }
  177. assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
  178. return result;
  179. }
  180. char* Arena::AllocateNewBlock(size_t block_bytes) {
  181. // Reserve space in `blocks_` before allocating memory via new.
  182. // Use `emplace_back()` instead of `reserve()` to let std::vector manage its
  183. // own memory and do fewer reallocations.
  184. //
  185. // - If `emplace_back` throws, no memory leaks because we haven't called `new`
  186. // yet.
  187. // - If `new` throws, no memory leaks because the vector will be cleaned up
  188. // via RAII.
  189. blocks_.emplace_back(nullptr);
  190. char* block = new char[block_bytes];
  191. size_t allocated_size;
  192. #ifdef ROCKSDB_MALLOC_USABLE_SIZE
  193. allocated_size = malloc_usable_size(block);
  194. #ifndef NDEBUG
  195. // It's hard to predict what malloc_usable_size() returns.
  196. // A callback can allow users to change the costed size.
  197. std::pair<size_t*, size_t*> pair(&allocated_size, &block_bytes);
  198. TEST_SYNC_POINT_CALLBACK("Arena::AllocateNewBlock:0", &pair);
  199. #endif // NDEBUG
  200. #else
  201. allocated_size = block_bytes;
  202. #endif // ROCKSDB_MALLOC_USABLE_SIZE
  203. blocks_memory_ += allocated_size;
  204. if (tracker_ != nullptr) {
  205. tracker_->Allocate(allocated_size);
  206. }
  207. blocks_.back() = block;
  208. return block;
  209. }
  210. } // namespace ROCKSDB_NAMESPACE