mutexlock.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #pragma once
  10. #include <assert.h>
  11. #include <atomic>
  12. #include <functional>
  13. #include <memory>
  14. #include <mutex>
  15. #include <thread>
  16. #include "port/port.h"
  17. #include "util/fastrange.h"
  18. #include "util/hash.h"
  19. namespace ROCKSDB_NAMESPACE {
  20. // Helper class that locks a mutex on construction and unlocks the mutex when
  21. // the destructor of the MutexLock object is invoked.
  22. //
  23. // Typical usage:
  24. //
  25. // void MyClass::MyMethod() {
  26. // MutexLock l(&mu_); // mu_ is an instance variable
  27. // ... some complex code, possibly with multiple return paths ...
  28. // }
  29. class MutexLock {
  30. public:
  31. explicit MutexLock(port::Mutex *mu) : mu_(mu) { this->mu_->Lock(); }
  32. // No copying allowed
  33. MutexLock(const MutexLock &) = delete;
  34. void operator=(const MutexLock &) = delete;
  35. ~MutexLock() { this->mu_->Unlock(); }
  36. private:
  37. port::Mutex *const mu_;
  38. };
  39. //
  40. // Acquire a ReadLock on the specified RWMutex.
  41. // The Lock will be automatically released when the
  42. // object goes out of scope.
  43. //
  44. class ReadLock {
  45. public:
  46. explicit ReadLock(port::RWMutex *mu) : mu_(mu) { this->mu_->ReadLock(); }
  47. // No copying allowed
  48. ReadLock(const ReadLock &) = delete;
  49. void operator=(const ReadLock &) = delete;
  50. ~ReadLock() { this->mu_->ReadUnlock(); }
  51. private:
  52. port::RWMutex *const mu_;
  53. };
  54. //
  55. // Automatically unlock a locked mutex when the object is destroyed
  56. //
  57. class ReadUnlock {
  58. public:
  59. explicit ReadUnlock(port::RWMutex *mu) : mu_(mu) { mu->AssertHeld(); }
  60. // No copying allowed
  61. ReadUnlock(const ReadUnlock &) = delete;
  62. ReadUnlock &operator=(const ReadUnlock &) = delete;
  63. ~ReadUnlock() { mu_->ReadUnlock(); }
  64. private:
  65. port::RWMutex *const mu_;
  66. };
  67. //
  68. // Acquire a WriteLock on the specified RWMutex.
  69. // The Lock will be automatically released then the
  70. // object goes out of scope.
  71. //
  72. class WriteLock {
  73. public:
  74. explicit WriteLock(port::RWMutex *mu) : mu_(mu) { this->mu_->WriteLock(); }
  75. // No copying allowed
  76. WriteLock(const WriteLock &) = delete;
  77. void operator=(const WriteLock &) = delete;
  78. ~WriteLock() { this->mu_->WriteUnlock(); }
  79. private:
  80. port::RWMutex *const mu_;
  81. };
  82. //
  83. // SpinMutex has very low overhead for low-contention cases. Method names
  84. // are chosen so you can use std::unique_lock or std::lock_guard with it.
  85. //
  86. class SpinMutex {
  87. public:
  88. SpinMutex() : locked_(false) {}
  89. bool try_lock() {
  90. auto currently_locked = locked_.load(std::memory_order_relaxed);
  91. return !currently_locked &&
  92. locked_.compare_exchange_weak(currently_locked, true,
  93. std::memory_order_acquire,
  94. std::memory_order_relaxed);
  95. }
  96. void lock() {
  97. for (size_t tries = 0;; ++tries) {
  98. if (try_lock()) {
  99. // success
  100. break;
  101. }
  102. port::AsmVolatilePause();
  103. if (tries > 100) {
  104. std::this_thread::yield();
  105. }
  106. }
  107. }
  108. void unlock() { locked_.store(false, std::memory_order_release); }
  109. private:
  110. std::atomic<bool> locked_;
  111. };
  112. // For preventing false sharing, especially for mutexes.
  113. // NOTE: if a mutex is less than half the size of a cache line, it would
  114. // make more sense for Striped structure below to pack more than one mutex
  115. // into each cache line, as this would only reduce contention for the same
  116. // amount of space and cache sharing. However, a mutex is often 40 bytes out
  117. // of a 64 byte cache line.
  118. template <class T>
  119. struct ALIGN_AS(CACHE_LINE_SIZE) CacheAlignedWrapper {
  120. T obj_;
  121. };
  122. template <class T>
  123. struct Unwrap {
  124. using type = T;
  125. static type &Go(T &t) { return t; }
  126. };
  127. template <class T>
  128. struct Unwrap<CacheAlignedWrapper<T>> {
  129. using type = T;
  130. static type &Go(CacheAlignedWrapper<T> &t) { return t.obj_; }
  131. };
  132. //
  133. // Inspired by Guava: https://github.com/google/guava/wiki/StripedExplained
  134. // A striped Lock. This offers the underlying lock striping similar
  135. // to that of ConcurrentHashMap in a reusable form, and extends it for
  136. // semaphores and read-write locks. Conceptually, lock striping is the technique
  137. // of dividing a lock into many <i>stripes</i>, increasing the granularity of a
  138. // single lock and allowing independent operations to lock different stripes and
  139. // proceed concurrently, instead of creating contention for a single lock.
  140. //
  141. template <class T, class Key = Slice, class Hash = SliceNPHasher64>
  142. class Striped {
  143. public:
  144. explicit Striped(size_t stripe_count)
  145. : stripe_count_(stripe_count), data_(new T[stripe_count]) {}
  146. using Unwrapped = typename Unwrap<T>::type;
  147. Unwrapped &Get(const Key &key, uint64_t seed = 0) {
  148. size_t index = FastRangeGeneric(hash_(key, seed), stripe_count_);
  149. return Unwrap<T>::Go(data_[index]);
  150. }
  151. size_t ApproximateMemoryUsage() const {
  152. // NOTE: could use malloc_usable_size() here, but that could count unmapped
  153. // pages and could mess up unit test OccLockBucketsTest::CacheAligned
  154. return sizeof(*this) + stripe_count_ * sizeof(T);
  155. }
  156. private:
  157. size_t stripe_count_;
  158. std::unique_ptr<T[]> data_;
  159. Hash hash_;
  160. };
  161. } // namespace ROCKSDB_NAMESPACE