arena.cc 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "memory/arena.h"
  10. #include <algorithm>
  11. #include "logging/logging.h"
  12. #include "port/malloc.h"
  13. #include "port/port.h"
  14. #include "rocksdb/env.h"
  15. #include "test_util/sync_point.h"
  16. #include "util/string_util.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. size_t Arena::OptimizeBlockSize(size_t block_size) {
  19. // Make sure block_size is in optimal range
  20. block_size = std::max(Arena::kMinBlockSize, block_size);
  21. block_size = std::min(Arena::kMaxBlockSize, block_size);
  22. // make sure block_size is the multiple of kAlignUnit
  23. if (block_size % kAlignUnit != 0) {
  24. block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
  25. }
  26. return block_size;
  27. }
  28. Arena::Arena(size_t block_size, AllocTracker* tracker, size_t huge_page_size)
  29. : kBlockSize(OptimizeBlockSize(block_size)), tracker_(tracker) {
  30. assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
  31. kBlockSize % kAlignUnit == 0);
  32. TEST_SYNC_POINT_CALLBACK("Arena::Arena:0", const_cast<size_t*>(&kBlockSize));
  33. alloc_bytes_remaining_ = sizeof(inline_block_);
  34. blocks_memory_ += alloc_bytes_remaining_;
  35. aligned_alloc_ptr_ = inline_block_;
  36. unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
  37. if (MemMapping::kHugePageSupported) {
  38. hugetlb_size_ = huge_page_size;
  39. if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
  40. hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
  41. }
  42. }
  43. if (tracker_ != nullptr) {
  44. tracker_->Allocate(kInlineSize);
  45. }
  46. }
  47. Arena::~Arena() {
  48. if (tracker_ != nullptr) {
  49. assert(tracker_->is_freed());
  50. tracker_->FreeMem();
  51. }
  52. }
  53. char* Arena::AllocateFallback(size_t bytes, bool aligned) {
  54. if (bytes > kBlockSize / 4) {
  55. ++irregular_block_num;
  56. // Object is more than a quarter of our block size. Allocate it separately
  57. // to avoid wasting too much space in leftover bytes.
  58. return AllocateNewBlock(bytes);
  59. }
  60. // We waste the remaining space in the current block.
  61. size_t size = 0;
  62. char* block_head = nullptr;
  63. if (MemMapping::kHugePageSupported && hugetlb_size_ > 0) {
  64. size = hugetlb_size_;
  65. block_head = AllocateFromHugePage(size);
  66. }
  67. if (!block_head) {
  68. size = kBlockSize;
  69. block_head = AllocateNewBlock(size);
  70. }
  71. alloc_bytes_remaining_ = size - bytes;
  72. if (aligned) {
  73. aligned_alloc_ptr_ = block_head + bytes;
  74. unaligned_alloc_ptr_ = block_head + size;
  75. return block_head;
  76. } else {
  77. aligned_alloc_ptr_ = block_head;
  78. unaligned_alloc_ptr_ = block_head + size - bytes;
  79. return unaligned_alloc_ptr_;
  80. }
  81. }
  82. char* Arena::AllocateFromHugePage(size_t bytes) {
  83. MemMapping mm = MemMapping::AllocateHuge(bytes);
  84. auto addr = static_cast<char*>(mm.Get());
  85. if (addr) {
  86. huge_blocks_.push_back(std::move(mm));
  87. blocks_memory_ += bytes;
  88. if (tracker_ != nullptr) {
  89. tracker_->Allocate(bytes);
  90. }
  91. }
  92. return addr;
  93. }
  94. char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
  95. Logger* logger) {
  96. if (MemMapping::kHugePageSupported && hugetlb_size_ > 0 &&
  97. huge_page_size > 0 && bytes > 0) {
  98. // Allocate from a huge page TLB table.
  99. size_t reserved_size =
  100. ((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
  101. assert(reserved_size >= bytes);
  102. char* addr = AllocateFromHugePage(reserved_size);
  103. if (addr == nullptr) {
  104. ROCKS_LOG_WARN(logger,
  105. "AllocateAligned fail to allocate huge TLB pages: %s",
  106. errnoStr(errno).c_str());
  107. // fail back to malloc
  108. } else {
  109. return addr;
  110. }
  111. }
  112. size_t current_mod =
  113. reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
  114. size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
  115. size_t needed = bytes + slop;
  116. char* result;
  117. if (needed <= alloc_bytes_remaining_) {
  118. result = aligned_alloc_ptr_ + slop;
  119. aligned_alloc_ptr_ += needed;
  120. alloc_bytes_remaining_ -= needed;
  121. } else {
  122. // AllocateFallback always returns aligned memory
  123. result = AllocateFallback(bytes, true /* aligned */);
  124. }
  125. assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
  126. return result;
  127. }
  128. char* Arena::AllocateNewBlock(size_t block_bytes) {
  129. // NOTE: std::make_unique zero-initializes the block so is not appropriate
  130. // here
  131. char* block = new char[block_bytes];
  132. blocks_.push_back(std::unique_ptr<char[]>(block));
  133. size_t allocated_size;
  134. #ifdef ROCKSDB_MALLOC_USABLE_SIZE
  135. allocated_size = malloc_usable_size(block);
  136. #ifndef NDEBUG
  137. // It's hard to predict what malloc_usable_size() returns.
  138. // A callback can allow users to change the costed size.
  139. std::pair<size_t*, size_t*> pair(&allocated_size, &block_bytes);
  140. TEST_SYNC_POINT_CALLBACK("Arena::AllocateNewBlock:0", &pair);
  141. #endif // NDEBUG
  142. #else
  143. allocated_size = block_bytes;
  144. #endif // ROCKSDB_MALLOC_USABLE_SIZE
  145. blocks_memory_ += allocated_size;
  146. if (tracker_ != nullptr) {
  147. tracker_->Allocate(allocated_size);
  148. }
  149. return block;
  150. }
  151. } // namespace ROCKSDB_NAMESPACE