db_impl_debug.cc 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #ifndef NDEBUG
  10. #include "db/column_family.h"
  11. #include "db/db_impl/db_impl.h"
  12. #include "db/error_handler.h"
  13. #include "monitoring/thread_status_updater.h"
  14. #include "util/cast_util.h"
  15. namespace ROCKSDB_NAMESPACE {
  16. uint64_t DBImpl::TEST_GetLevel0TotalSize() {
  17. InstrumentedMutexLock l(&mutex_);
  18. return default_cf_handle_->cfd()->current()->storage_info()->NumLevelBytes(0);
  19. }
  20. void DBImpl::TEST_SwitchWAL() {
  21. WriteContext write_context;
  22. InstrumentedMutexLock l(&mutex_);
  23. void* writer = TEST_BeginWrite();
  24. SwitchWAL(&write_context);
  25. TEST_EndWrite(writer);
  26. }
  27. bool DBImpl::TEST_WALBufferIsEmpty(bool lock) {
  28. if (lock) {
  29. log_write_mutex_.Lock();
  30. }
  31. log::Writer* cur_log_writer = logs_.back().writer;
  32. auto res = cur_log_writer->TEST_BufferIsEmpty();
  33. if (lock) {
  34. log_write_mutex_.Unlock();
  35. }
  36. return res;
  37. }
  38. int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes(
  39. ColumnFamilyHandle* column_family) {
  40. ColumnFamilyData* cfd;
  41. if (column_family == nullptr) {
  42. cfd = default_cf_handle_->cfd();
  43. } else {
  44. auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  45. cfd = cfh->cfd();
  46. }
  47. InstrumentedMutexLock l(&mutex_);
  48. return cfd->current()->storage_info()->MaxNextLevelOverlappingBytes();
  49. }
  50. void DBImpl::TEST_GetFilesMetaData(
  51. ColumnFamilyHandle* column_family,
  52. std::vector<std::vector<FileMetaData>>* metadata) {
  53. auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  54. auto cfd = cfh->cfd();
  55. InstrumentedMutexLock l(&mutex_);
  56. metadata->resize(NumberLevels());
  57. for (int level = 0; level < NumberLevels(); level++) {
  58. const std::vector<FileMetaData*>& files =
  59. cfd->current()->storage_info()->LevelFiles(level);
  60. (*metadata)[level].clear();
  61. for (const auto& f : files) {
  62. (*metadata)[level].push_back(*f);
  63. }
  64. }
  65. }
  66. uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
  67. return versions_->manifest_file_number();
  68. }
  69. uint64_t DBImpl::TEST_Current_Next_FileNo() {
  70. return versions_->current_next_file_number();
  71. }
  72. Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
  73. const Slice* end,
  74. ColumnFamilyHandle* column_family,
  75. bool disallow_trivial_move) {
  76. ColumnFamilyData* cfd;
  77. if (column_family == nullptr) {
  78. cfd = default_cf_handle_->cfd();
  79. } else {
  80. auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  81. cfd = cfh->cfd();
  82. }
  83. int output_level =
  84. (cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
  85. cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
  86. ? level
  87. : level + 1;
  88. return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(),
  89. begin, end, true, disallow_trivial_move,
  90. port::kMaxUint64 /*max_file_num_to_ignore*/);
  91. }
  92. Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {
  93. WriteContext write_context;
  94. InstrumentedMutexLock l(&mutex_);
  95. if (cfd == nullptr) {
  96. cfd = default_cf_handle_->cfd();
  97. }
  98. Status s;
  99. void* writer = TEST_BeginWrite();
  100. if (two_write_queues_) {
  101. WriteThread::Writer nonmem_w;
  102. nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
  103. s = SwitchMemtable(cfd, &write_context);
  104. nonmem_write_thread_.ExitUnbatched(&nonmem_w);
  105. } else {
  106. s = SwitchMemtable(cfd, &write_context);
  107. }
  108. TEST_EndWrite(writer);
  109. return s;
  110. }
  111. Status DBImpl::TEST_FlushMemTable(bool wait, bool allow_write_stall,
  112. ColumnFamilyHandle* cfh) {
  113. FlushOptions fo;
  114. fo.wait = wait;
  115. fo.allow_write_stall = allow_write_stall;
  116. ColumnFamilyData* cfd;
  117. if (cfh == nullptr) {
  118. cfd = default_cf_handle_->cfd();
  119. } else {
  120. auto cfhi = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh);
  121. cfd = cfhi->cfd();
  122. }
  123. return FlushMemTable(cfd, fo, FlushReason::kTest);
  124. }
  125. Status DBImpl::TEST_FlushMemTable(ColumnFamilyData* cfd,
  126. const FlushOptions& flush_opts) {
  127. return FlushMemTable(cfd, flush_opts, FlushReason::kTest);
  128. }
  129. Status DBImpl::TEST_AtomicFlushMemTables(
  130. const autovector<ColumnFamilyData*>& cfds, const FlushOptions& flush_opts) {
  131. return AtomicFlushMemTables(cfds, flush_opts, FlushReason::kTest);
  132. }
  133. Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) {
  134. ColumnFamilyData* cfd;
  135. if (column_family == nullptr) {
  136. cfd = default_cf_handle_->cfd();
  137. } else {
  138. auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  139. cfd = cfh->cfd();
  140. }
  141. return WaitForFlushMemTable(cfd, nullptr, false);
  142. }
  143. Status DBImpl::TEST_WaitForCompact(bool wait_unscheduled) {
  144. // Wait until the compaction completes
  145. // TODO: a bug here. This function actually does not necessarily
  146. // wait for compact. It actually waits for scheduled compaction
  147. // OR flush to finish.
  148. InstrumentedMutexLock l(&mutex_);
  149. while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
  150. bg_flush_scheduled_ ||
  151. (wait_unscheduled && unscheduled_compactions_)) &&
  152. (error_handler_.GetBGError() == Status::OK())) {
  153. bg_cv_.Wait();
  154. }
  155. return error_handler_.GetBGError();
  156. }
  157. void DBImpl::TEST_LockMutex() { mutex_.Lock(); }
  158. void DBImpl::TEST_UnlockMutex() { mutex_.Unlock(); }
  159. void* DBImpl::TEST_BeginWrite() {
  160. auto w = new WriteThread::Writer();
  161. write_thread_.EnterUnbatched(w, &mutex_);
  162. return reinterpret_cast<void*>(w);
  163. }
  164. void DBImpl::TEST_EndWrite(void* w) {
  165. auto writer = reinterpret_cast<WriteThread::Writer*>(w);
  166. write_thread_.ExitUnbatched(writer);
  167. delete writer;
  168. }
  169. size_t DBImpl::TEST_LogsToFreeSize() {
  170. InstrumentedMutexLock l(&mutex_);
  171. return logs_to_free_.size();
  172. }
  173. uint64_t DBImpl::TEST_LogfileNumber() {
  174. InstrumentedMutexLock l(&mutex_);
  175. return logfile_number_;
  176. }
  177. Status DBImpl::TEST_GetAllImmutableCFOptions(
  178. std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map) {
  179. std::vector<std::string> cf_names;
  180. std::vector<const ImmutableCFOptions*> iopts;
  181. {
  182. InstrumentedMutexLock l(&mutex_);
  183. for (auto cfd : *versions_->GetColumnFamilySet()) {
  184. cf_names.push_back(cfd->GetName());
  185. iopts.push_back(cfd->ioptions());
  186. }
  187. }
  188. iopts_map->clear();
  189. for (size_t i = 0; i < cf_names.size(); ++i) {
  190. iopts_map->insert({cf_names[i], iopts[i]});
  191. }
  192. return Status::OK();
  193. }
  194. uint64_t DBImpl::TEST_FindMinLogContainingOutstandingPrep() {
  195. return logs_with_prep_tracker_.FindMinLogContainingOutstandingPrep();
  196. }
  197. size_t DBImpl::TEST_PreparedSectionCompletedSize() {
  198. return logs_with_prep_tracker_.TEST_PreparedSectionCompletedSize();
  199. }
  200. size_t DBImpl::TEST_LogsWithPrepSize() {
  201. return logs_with_prep_tracker_.TEST_LogsWithPrepSize();
  202. }
  203. uint64_t DBImpl::TEST_FindMinPrepLogReferencedByMemTable() {
  204. autovector<MemTable*> empty_list;
  205. return FindMinPrepLogReferencedByMemTable(versions_.get(), nullptr,
  206. empty_list);
  207. }
  208. Status DBImpl::TEST_GetLatestMutableCFOptions(
  209. ColumnFamilyHandle* column_family, MutableCFOptions* mutable_cf_options) {
  210. InstrumentedMutexLock l(&mutex_);
  211. auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  212. *mutable_cf_options = *cfh->cfd()->GetLatestMutableCFOptions();
  213. return Status::OK();
  214. }
  215. int DBImpl::TEST_BGCompactionsAllowed() const {
  216. InstrumentedMutexLock l(&mutex_);
  217. return GetBGJobLimits().max_compactions;
  218. }
  219. int DBImpl::TEST_BGFlushesAllowed() const {
  220. InstrumentedMutexLock l(&mutex_);
  221. return GetBGJobLimits().max_flushes;
  222. }
  223. SequenceNumber DBImpl::TEST_GetLastVisibleSequence() const {
  224. if (last_seq_same_as_publish_seq_) {
  225. return versions_->LastSequence();
  226. } else {
  227. return versions_->LastAllocatedSequence();
  228. }
  229. }
  230. size_t DBImpl::TEST_GetWalPreallocateBlockSize(
  231. uint64_t write_buffer_size) const {
  232. InstrumentedMutexLock l(&mutex_);
  233. return GetWalPreallocateBlockSize(write_buffer_size);
  234. }
  235. void DBImpl::TEST_WaitForDumpStatsRun(std::function<void()> callback) const {
  236. if (thread_dump_stats_ != nullptr) {
  237. thread_dump_stats_->TEST_WaitForRun(callback);
  238. }
  239. }
  240. void DBImpl::TEST_WaitForPersistStatsRun(std::function<void()> callback) const {
  241. if (thread_persist_stats_ != nullptr) {
  242. thread_persist_stats_->TEST_WaitForRun(callback);
  243. }
  244. }
  245. bool DBImpl::TEST_IsPersistentStatsEnabled() const {
  246. return thread_persist_stats_ && thread_persist_stats_->IsRunning();
  247. }
  248. size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const {
  249. return EstimateInMemoryStatsHistorySize();
  250. }
  251. } // namespace ROCKSDB_NAMESPACE
  252. #endif // NDEBUG