tiered_secondary_cache.cc 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. // Copyright (c) Meta Platforms, Inc. and affiliates.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. #include "cache/tiered_secondary_cache.h"
  6. #include "monitoring/statistics_impl.h"
  7. namespace ROCKSDB_NAMESPACE {
  8. // Creation callback for use in the lookup path. It calls the upper layer
  9. // create_cb to create the object, and optionally calls the compressed
  10. // secondary cache InsertSaved to save the compressed block. If
  11. // advise_erase is set, it means the primary cache wants the block to be
  12. // erased in the secondary cache, so we skip calling InsertSaved.
  13. //
  14. // For the time being, we assume that all blocks in the nvm tier belong to
  15. // the primary block cache (i.e CacheTier::kVolatileTier). That can be changed
  16. // if we implement demotion from the compressed secondary cache to the nvm
  17. // cache in the future.
  18. Status TieredSecondaryCache::MaybeInsertAndCreate(
  19. const Slice& data, CompressionType type, CacheTier source,
  20. Cache::CreateContext* ctx, MemoryAllocator* allocator,
  21. Cache::ObjectPtr* out_obj, size_t* out_charge) {
  22. TieredSecondaryCache::CreateContext* context =
  23. static_cast<TieredSecondaryCache::CreateContext*>(ctx);
  24. assert(source == CacheTier::kVolatileTier);
  25. if (!context->advise_erase && type != kNoCompression) {
  26. // Attempt to insert into compressed secondary cache
  27. // TODO: Don't hardcode the source
  28. context->comp_sec_cache->InsertSaved(*context->key, data, type, source)
  29. .PermitUncheckedError();
  30. RecordTick(context->stats, COMPRESSED_SECONDARY_CACHE_PROMOTIONS);
  31. } else {
  32. RecordTick(context->stats, COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS);
  33. }
  34. // Primary cache will accept the object, so call its helper to create
  35. // the object
  36. return context->helper->create_cb(data, type, source, context->inner_ctx,
  37. allocator, out_obj, out_charge);
  38. }
  39. // The lookup first looks up in the compressed secondary cache. If its a miss,
  40. // then the nvm cache lookup is called. The cache item helper and create
  41. // context are wrapped in order to intercept the creation callback to make
  42. // the decision on promoting to the compressed secondary cache.
  43. std::unique_ptr<SecondaryCacheResultHandle> TieredSecondaryCache::Lookup(
  44. const Slice& key, const Cache::CacheItemHelper* helper,
  45. Cache::CreateContext* create_context, bool wait, bool advise_erase,
  46. Statistics* stats, bool& kept_in_sec_cache) {
  47. bool dummy = false;
  48. std::unique_ptr<SecondaryCacheResultHandle> result =
  49. target()->Lookup(key, helper, create_context, wait, advise_erase, stats,
  50. /*kept_in_sec_cache=*/dummy);
  51. // We never want the item to spill back into the secondary cache
  52. kept_in_sec_cache = true;
  53. if (result) {
  54. assert(result->IsReady());
  55. return result;
  56. }
  57. // If wait is true, then we can be a bit more efficient and avoid a memory
  58. // allocation for the CReateContext.
  59. const Cache::CacheItemHelper* outer_helper =
  60. TieredSecondaryCache::GetHelper();
  61. if (wait) {
  62. TieredSecondaryCache::CreateContext ctx;
  63. ctx.key = &key;
  64. ctx.advise_erase = advise_erase;
  65. ctx.helper = helper;
  66. ctx.inner_ctx = create_context;
  67. ctx.comp_sec_cache = target();
  68. ctx.stats = stats;
  69. return nvm_sec_cache_->Lookup(key, outer_helper, &ctx, wait, advise_erase,
  70. stats, kept_in_sec_cache);
  71. }
  72. // If wait is false, i.e its an async lookup, we have to allocate a result
  73. // handle for tracking purposes. Embed the CreateContext inside the handle
  74. // so we need only allocate memory once instead of twice.
  75. std::unique_ptr<ResultHandle> handle(new ResultHandle());
  76. handle->ctx()->key = &key;
  77. handle->ctx()->advise_erase = advise_erase;
  78. handle->ctx()->helper = helper;
  79. handle->ctx()->inner_ctx = create_context;
  80. handle->ctx()->comp_sec_cache = target();
  81. handle->ctx()->stats = stats;
  82. handle->SetInnerHandle(
  83. nvm_sec_cache_->Lookup(key, outer_helper, handle->ctx(), wait,
  84. advise_erase, stats, kept_in_sec_cache));
  85. if (!handle->inner_handle()) {
  86. handle.reset();
  87. } else {
  88. result.reset(handle.release());
  89. }
  90. return result;
  91. }
  92. // Call the nvm cache WaitAll to complete the lookups
  93. void TieredSecondaryCache::WaitAll(
  94. std::vector<SecondaryCacheResultHandle*> handles) {
  95. std::vector<SecondaryCacheResultHandle*> nvm_handles;
  96. std::vector<ResultHandle*> my_handles;
  97. nvm_handles.reserve(handles.size());
  98. for (auto handle : handles) {
  99. // The handle could belong to the compressed secondary cache. Skip if
  100. // that's the case.
  101. if (handle->IsReady()) {
  102. continue;
  103. }
  104. ResultHandle* hdl = static_cast<ResultHandle*>(handle);
  105. nvm_handles.push_back(hdl->inner_handle());
  106. my_handles.push_back(hdl);
  107. }
  108. nvm_sec_cache_->WaitAll(nvm_handles);
  109. for (auto handle : my_handles) {
  110. assert(handle->inner_handle()->IsReady());
  111. handle->Complete();
  112. }
  113. }
  114. } // namespace ROCKSDB_NAMESPACE