arena.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. // Arena is an implementation of Allocator class. For a request of small size,
  10. // it allocates a block with pre-defined block size. For a request of big
  11. // size, it uses malloc to directly get the requested size.
  12. #pragma once
  13. #ifndef OS_WIN
  14. #include <sys/mman.h>
  15. #endif
  16. #include <assert.h>
  17. #include <stdint.h>
  18. #include <cerrno>
  19. #include <cstddef>
  20. #include <vector>
  21. #include "memory/allocator.h"
  22. #include "util/mutexlock.h"
  23. namespace ROCKSDB_NAMESPACE {
  24. class Arena : public Allocator {
  25. public:
  26. // No copying allowed
  27. Arena(const Arena&) = delete;
  28. void operator=(const Arena&) = delete;
  29. static const size_t kInlineSize = 2048;
  30. static const size_t kMinBlockSize;
  31. static const size_t kMaxBlockSize;
  32. // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
  33. // supported hugepage size of the system), block allocation will try huge
  34. // page TLB first. If allocation fails, will fall back to normal case.
  35. explicit Arena(size_t block_size = kMinBlockSize,
  36. AllocTracker* tracker = nullptr, size_t huge_page_size = 0);
  37. ~Arena();
  38. char* Allocate(size_t bytes) override;
  39. // huge_page_size: if >0, will try to allocate from huage page TLB.
  40. // The argument will be the size of the page size for huge page TLB. Bytes
  41. // will be rounded up to multiple of the page size to allocate through mmap
  42. // anonymous option with huge page on. The extra space allocated will be
  43. // wasted. If allocation fails, will fall back to normal case. To enable it,
  44. // need to reserve huge pages for it to be allocated, like:
  45. // sysctl -w vm.nr_hugepages=20
  46. // See linux doc Documentation/vm/hugetlbpage.txt for details.
  47. // huge page allocation can fail. In this case it will fail back to
  48. // normal cases. The messages will be logged to logger. So when calling with
  49. // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
  50. // Otherwise, the error message will be printed out to stderr directly.
  51. char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
  52. Logger* logger = nullptr) override;
  53. // Returns an estimate of the total memory usage of data allocated
  54. // by the arena (exclude the space allocated but not yet used for future
  55. // allocations).
  56. size_t ApproximateMemoryUsage() const {
  57. return blocks_memory_ + blocks_.capacity() * sizeof(char*) -
  58. alloc_bytes_remaining_;
  59. }
  60. size_t MemoryAllocatedBytes() const { return blocks_memory_; }
  61. size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
  62. // If an allocation is too big, we'll allocate an irregular block with the
  63. // same size of that allocation.
  64. size_t IrregularBlockNum() const { return irregular_block_num; }
  65. size_t BlockSize() const override { return kBlockSize; }
  66. bool IsInInlineBlock() const {
  67. return blocks_.empty();
  68. }
  69. private:
  70. char inline_block_[kInlineSize] __attribute__((__aligned__(alignof(max_align_t))));
  71. // Number of bytes allocated in one block
  72. const size_t kBlockSize;
  73. // Array of new[] allocated memory blocks
  74. typedef std::vector<char*> Blocks;
  75. Blocks blocks_;
  76. struct MmapInfo {
  77. void* addr_;
  78. size_t length_;
  79. MmapInfo(void* addr, size_t length) : addr_(addr), length_(length) {}
  80. };
  81. std::vector<MmapInfo> huge_blocks_;
  82. size_t irregular_block_num = 0;
  83. // Stats for current active block.
  84. // For each block, we allocate aligned memory chucks from one end and
  85. // allocate unaligned memory chucks from the other end. Otherwise the
  86. // memory waste for alignment will be higher if we allocate both types of
  87. // memory from one direction.
  88. char* unaligned_alloc_ptr_ = nullptr;
  89. char* aligned_alloc_ptr_ = nullptr;
  90. // How many bytes left in currently active block?
  91. size_t alloc_bytes_remaining_ = 0;
  92. #ifdef MAP_HUGETLB
  93. size_t hugetlb_size_ = 0;
  94. #endif // MAP_HUGETLB
  95. char* AllocateFromHugePage(size_t bytes);
  96. char* AllocateFallback(size_t bytes, bool aligned);
  97. char* AllocateNewBlock(size_t block_bytes);
  98. // Bytes of memory in blocks allocated so far
  99. size_t blocks_memory_ = 0;
  100. AllocTracker* tracker_;
  101. };
  102. inline char* Arena::Allocate(size_t bytes) {
  103. // The semantics of what to return are a bit messy if we allow
  104. // 0-byte allocations, so we disallow them here (we don't need
  105. // them for our internal use).
  106. assert(bytes > 0);
  107. if (bytes <= alloc_bytes_remaining_) {
  108. unaligned_alloc_ptr_ -= bytes;
  109. alloc_bytes_remaining_ -= bytes;
  110. return unaligned_alloc_ptr_;
  111. }
  112. return AllocateFallback(bytes, false /* unaligned */);
  113. }
  114. // check and adjust the block_size so that the return value is
  115. // 1. in the range of [kMinBlockSize, kMaxBlockSize].
  116. // 2. the multiple of align unit.
  117. extern size_t OptimizeBlockSize(size_t block_size);
  118. } // namespace ROCKSDB_NAMESPACE