cache_reservation_manager.cc 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "cache/cache_reservation_manager.h"
  10. #include <cassert>
  11. #include <cstddef>
  12. #include <cstring>
  13. #include <memory>
  14. #include "rocksdb/cache.h"
  15. #include "rocksdb/slice.h"
  16. #include "rocksdb/status.h"
  17. #include "table/block_based/reader_common.h"
  18. #include "util/coding.h"
  19. namespace ROCKSDB_NAMESPACE {
  20. template <CacheEntryRole R>
  21. CacheReservationManagerImpl<R>::CacheReservationHandle::CacheReservationHandle(
  22. std::size_t incremental_memory_used,
  23. std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr)
  24. : incremental_memory_used_(incremental_memory_used) {
  25. assert(cache_res_mgr);
  26. cache_res_mgr_ = cache_res_mgr;
  27. }
  28. template <CacheEntryRole R>
  29. CacheReservationManagerImpl<
  30. R>::CacheReservationHandle::~CacheReservationHandle() {
  31. Status s = cache_res_mgr_->ReleaseCacheReservation(incremental_memory_used_);
  32. s.PermitUncheckedError();
  33. }
  34. template <CacheEntryRole R>
  35. CacheReservationManagerImpl<R>::CacheReservationManagerImpl(
  36. std::shared_ptr<Cache> cache, bool delayed_decrease)
  37. : cache_(cache),
  38. delayed_decrease_(delayed_decrease),
  39. cache_allocated_size_(0),
  40. memory_used_(0) {
  41. assert(cache != nullptr);
  42. }
  43. template <CacheEntryRole R>
  44. CacheReservationManagerImpl<R>::~CacheReservationManagerImpl() {
  45. for (auto* handle : dummy_handles_) {
  46. cache_.ReleaseAndEraseIfLastRef(handle);
  47. }
  48. }
  49. template <CacheEntryRole R>
  50. Status CacheReservationManagerImpl<R>::UpdateCacheReservation(
  51. std::size_t new_mem_used) {
  52. memory_used_ = new_mem_used;
  53. std::size_t cur_cache_allocated_size =
  54. cache_allocated_size_.load(std::memory_order_relaxed);
  55. if (new_mem_used == cur_cache_allocated_size) {
  56. return Status::OK();
  57. } else if (new_mem_used > cur_cache_allocated_size) {
  58. Status s = IncreaseCacheReservation(new_mem_used);
  59. return s;
  60. } else {
  61. // In delayed decrease mode, we don't decrease cache reservation
  62. // untill the memory usage is less than 3/4 of what we reserve
  63. // in the cache.
  64. // We do this because
  65. // (1) Dummy entry insertion is expensive in block cache
  66. // (2) Delayed releasing previously inserted dummy entries can save such
  67. // expensive dummy entry insertion on memory increase in the near future,
  68. // which is likely to happen when the memory usage is greater than or equal
  69. // to 3/4 of what we reserve
  70. if (delayed_decrease_ && new_mem_used >= cur_cache_allocated_size / 4 * 3) {
  71. return Status::OK();
  72. } else {
  73. Status s = DecreaseCacheReservation(new_mem_used);
  74. return s;
  75. }
  76. }
  77. }
  78. template <CacheEntryRole R>
  79. Status CacheReservationManagerImpl<R>::MakeCacheReservation(
  80. std::size_t incremental_memory_used,
  81. std::unique_ptr<CacheReservationManager::CacheReservationHandle>* handle) {
  82. assert(handle);
  83. Status s =
  84. UpdateCacheReservation(GetTotalMemoryUsed() + incremental_memory_used);
  85. (*handle).reset(new CacheReservationManagerImpl::CacheReservationHandle(
  86. incremental_memory_used,
  87. std::enable_shared_from_this<
  88. CacheReservationManagerImpl<R>>::shared_from_this()));
  89. return s;
  90. }
  91. template <CacheEntryRole R>
  92. Status CacheReservationManagerImpl<R>::ReleaseCacheReservation(
  93. std::size_t incremental_memory_used) {
  94. assert(GetTotalMemoryUsed() >= incremental_memory_used);
  95. std::size_t updated_total_mem_used =
  96. GetTotalMemoryUsed() - incremental_memory_used;
  97. Status s = UpdateCacheReservation(updated_total_mem_used);
  98. return s;
  99. }
  100. template <CacheEntryRole R>
  101. Status CacheReservationManagerImpl<R>::IncreaseCacheReservation(
  102. std::size_t new_mem_used) {
  103. Status return_status = Status::OK();
  104. while (new_mem_used > cache_allocated_size_.load(std::memory_order_relaxed)) {
  105. Cache::Handle* handle = nullptr;
  106. return_status = cache_.Insert(GetNextCacheKey(), kSizeDummyEntry, &handle);
  107. if (return_status != Status::OK()) {
  108. return return_status;
  109. }
  110. dummy_handles_.push_back(handle);
  111. cache_allocated_size_ += kSizeDummyEntry;
  112. }
  113. return return_status;
  114. }
  115. template <CacheEntryRole R>
  116. Status CacheReservationManagerImpl<R>::DecreaseCacheReservation(
  117. std::size_t new_mem_used) {
  118. Status return_status = Status::OK();
  119. // Decrease to the smallest multiple of kSizeDummyEntry that is greater than
  120. // or equal to new_mem_used We do addition instead of new_mem_used <=
  121. // cache_allocated_size_.load(std::memory_order_relaxed) - kSizeDummyEntry to
  122. // avoid underflow of size_t when cache_allocated_size_ = 0
  123. while (new_mem_used + kSizeDummyEntry <=
  124. cache_allocated_size_.load(std::memory_order_relaxed)) {
  125. assert(!dummy_handles_.empty());
  126. auto* handle = dummy_handles_.back();
  127. cache_.ReleaseAndEraseIfLastRef(handle);
  128. dummy_handles_.pop_back();
  129. cache_allocated_size_ -= kSizeDummyEntry;
  130. }
  131. return return_status;
  132. }
  133. template <CacheEntryRole R>
  134. std::size_t CacheReservationManagerImpl<R>::GetTotalReservedCacheSize() {
  135. return cache_allocated_size_.load(std::memory_order_relaxed);
  136. }
  137. template <CacheEntryRole R>
  138. std::size_t CacheReservationManagerImpl<R>::GetTotalMemoryUsed() {
  139. return memory_used_;
  140. }
  141. template <CacheEntryRole R>
  142. Slice CacheReservationManagerImpl<R>::GetNextCacheKey() {
  143. // Calling this function will have the side-effect of changing the
  144. // underlying cache_key_ that is shared among other keys generated from this
  145. // fucntion. Therefore please make sure the previous keys are saved/copied
  146. // before calling this function.
  147. cache_key_ = CacheKey::CreateUniqueForCacheLifetime(cache_.get());
  148. return cache_key_.AsSlice();
  149. }
  150. template <CacheEntryRole R>
  151. const Cache::CacheItemHelper*
  152. CacheReservationManagerImpl<R>::TEST_GetCacheItemHelperForRole() {
  153. return CacheInterface::GetHelper();
  154. }
  155. template class CacheReservationManagerImpl<
  156. CacheEntryRole::kBlockBasedTableReader>;
  157. template class CacheReservationManagerImpl<
  158. CacheEntryRole::kCompressionDictionaryBuildingBuffer>;
  159. template class CacheReservationManagerImpl<CacheEntryRole::kFilterConstruction>;
  160. template class CacheReservationManagerImpl<CacheEntryRole::kMisc>;
  161. template class CacheReservationManagerImpl<CacheEntryRole::kWriteBuffer>;
  162. template class CacheReservationManagerImpl<CacheEntryRole::kFileMetadata>;
  163. template class CacheReservationManagerImpl<CacheEntryRole::kBlobCache>;
  164. } // namespace ROCKSDB_NAMESPACE