arena.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. // Arena is an implementation of Allocator class. For a request of small size,
  10. // it allocates a block with pre-defined block size. For a request of big
  11. // size, it uses malloc to directly get the requested size.
  12. #pragma once
  13. #include <cstddef>
  14. #include <deque>
  15. #include "memory/allocator.h"
  16. #include "port/mmap.h"
  17. #include "rocksdb/env.h"
  18. namespace ROCKSDB_NAMESPACE {
  19. class Arena : public Allocator {
  20. public:
  21. // No copying allowed
  22. Arena(const Arena&) = delete;
  23. void operator=(const Arena&) = delete;
  24. static constexpr size_t kInlineSize = 2048;
  25. static constexpr size_t kMinBlockSize = 4096;
  26. static constexpr size_t kMaxBlockSize = 2u << 30;
  27. static constexpr unsigned kAlignUnit = alignof(std::max_align_t);
  28. static_assert((kAlignUnit & (kAlignUnit - 1)) == 0,
  29. "Pointer size should be power of 2");
  30. // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
  31. // supported hugepage size of the system), block allocation will try huge
  32. // page TLB first. If allocation fails, will fall back to normal case.
  33. explicit Arena(size_t block_size = kMinBlockSize,
  34. AllocTracker* tracker = nullptr, size_t huge_page_size = 0);
  35. ~Arena();
  36. char* Allocate(size_t bytes) override;
  37. // huge_page_size: if >0, will try to allocate from huage page TLB.
  38. // The argument will be the size of the page size for huge page TLB. Bytes
  39. // will be rounded up to multiple of the page size to allocate through mmap
  40. // anonymous option with huge page on. The extra space allocated will be
  41. // wasted. If allocation fails, will fall back to normal case. To enable it,
  42. // need to reserve huge pages for it to be allocated, like:
  43. // sysctl -w vm.nr_hugepages=20
  44. // See linux doc Documentation/vm/hugetlbpage.txt for details.
  45. // huge page allocation can fail. In this case it will fail back to
  46. // normal cases. The messages will be logged to logger. So when calling with
  47. // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
  48. // Otherwise, the error message will be printed out to stderr directly.
  49. char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
  50. Logger* logger = nullptr) override;
  51. // Returns an estimate of the total memory usage of data allocated
  52. // by the arena (exclude the space allocated but not yet used for future
  53. // allocations).
  54. size_t ApproximateMemoryUsage() const {
  55. return blocks_memory_ + blocks_.size() * sizeof(char*) -
  56. alloc_bytes_remaining_;
  57. }
  58. size_t MemoryAllocatedBytes() const { return blocks_memory_; }
  59. size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
  60. // If an allocation is too big, we'll allocate an irregular block with the
  61. // same size of that allocation.
  62. size_t IrregularBlockNum() const { return irregular_block_num; }
  63. size_t BlockSize() const override { return kBlockSize; }
  64. bool IsInInlineBlock() const {
  65. return blocks_.empty() && huge_blocks_.empty();
  66. }
  67. // check and adjust the block_size so that the return value is
  68. // 1. in the range of [kMinBlockSize, kMaxBlockSize].
  69. // 2. the multiple of align unit.
  70. static size_t OptimizeBlockSize(size_t block_size);
  71. private:
  72. alignas(std::max_align_t) char inline_block_[kInlineSize];
  73. // Number of bytes allocated in one block
  74. const size_t kBlockSize;
  75. // Allocated memory blocks
  76. std::deque<std::unique_ptr<char[]>> blocks_;
  77. // Huge page allocations
  78. std::deque<MemMapping> huge_blocks_;
  79. size_t irregular_block_num = 0;
  80. // Stats for current active block.
  81. // For each block, we allocate aligned memory chucks from one end and
  82. // allocate unaligned memory chucks from the other end. Otherwise the
  83. // memory waste for alignment will be higher if we allocate both types of
  84. // memory from one direction.
  85. char* unaligned_alloc_ptr_ = nullptr;
  86. char* aligned_alloc_ptr_ = nullptr;
  87. // How many bytes left in currently active block?
  88. size_t alloc_bytes_remaining_ = 0;
  89. size_t hugetlb_size_ = 0;
  90. char* AllocateFromHugePage(size_t bytes);
  91. char* AllocateFallback(size_t bytes, bool aligned);
  92. char* AllocateNewBlock(size_t block_bytes);
  93. // Bytes of memory in blocks allocated so far
  94. size_t blocks_memory_ = 0;
  95. // Non-owned
  96. AllocTracker* tracker_;
  97. };
  98. inline char* Arena::Allocate(size_t bytes) {
  99. // The semantics of what to return are a bit messy if we allow
  100. // 0-byte allocations, so we disallow them here (we don't need
  101. // them for our internal use).
  102. assert(bytes > 0);
  103. if (bytes <= alloc_bytes_remaining_) {
  104. unaligned_alloc_ptr_ -= bytes;
  105. alloc_bytes_remaining_ -= bytes;
  106. return unaligned_alloc_ptr_;
  107. }
  108. return AllocateFallback(bytes, false /* unaligned */);
  109. }
  110. // Like std::destroy_at but a callable type
  111. template <typename T>
  112. struct Destroyer {
  113. void operator()(T* ptr) { ptr->~T(); }
  114. };
  115. // Like std::unique_ptr but only placement-deletes the object (for
  116. // objects allocated on an arena).
  117. template <typename T>
  118. using ScopedArenaPtr = std::unique_ptr<T, Destroyer<T>>;
  119. } // namespace ROCKSDB_NAMESPACE