db_impl.h 88 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #pragma once
  10. #include <atomic>
  11. #include <deque>
  12. #include <functional>
  13. #include <limits>
  14. #include <list>
  15. #include <map>
  16. #include <set>
  17. #include <string>
  18. #include <utility>
  19. #include <vector>
  20. #include "db/column_family.h"
  21. #include "db/compaction/compaction_job.h"
  22. #include "db/dbformat.h"
  23. #include "db/error_handler.h"
  24. #include "db/event_helpers.h"
  25. #include "db/external_sst_file_ingestion_job.h"
  26. #include "db/flush_job.h"
  27. #include "db/flush_scheduler.h"
  28. #include "db/import_column_family_job.h"
  29. #include "db/internal_stats.h"
  30. #include "db/log_writer.h"
  31. #include "db/logs_with_prep_tracker.h"
  32. #include "db/memtable_list.h"
  33. #include "db/pre_release_callback.h"
  34. #include "db/range_del_aggregator.h"
  35. #include "db/read_callback.h"
  36. #include "db/snapshot_checker.h"
  37. #include "db/snapshot_impl.h"
  38. #include "db/trim_history_scheduler.h"
  39. #include "db/version_edit.h"
  40. #include "db/wal_manager.h"
  41. #include "db/write_controller.h"
  42. #include "db/write_thread.h"
  43. #include "logging/event_logger.h"
  44. #include "monitoring/instrumented_mutex.h"
  45. #include "options/db_options.h"
  46. #include "port/port.h"
  47. #include "rocksdb/db.h"
  48. #include "rocksdb/env.h"
  49. #include "rocksdb/memtablerep.h"
  50. #include "rocksdb/status.h"
  51. #include "rocksdb/trace_reader_writer.h"
  52. #include "rocksdb/transaction_log.h"
  53. #include "rocksdb/write_buffer_manager.h"
  54. #include "table/scoped_arena_iterator.h"
  55. #include "trace_replay/block_cache_tracer.h"
  56. #include "trace_replay/trace_replay.h"
  57. #include "util/autovector.h"
  58. #include "util/hash.h"
  59. #include "util/repeatable_thread.h"
  60. #include "util/stop_watch.h"
  61. #include "util/thread_local.h"
  62. namespace ROCKSDB_NAMESPACE {
  63. class Arena;
  64. class ArenaWrappedDBIter;
  65. class InMemoryStatsHistoryIterator;
  66. class MemTable;
  67. class PersistentStatsHistoryIterator;
  68. class TableCache;
  69. class TaskLimiterToken;
  70. class Version;
  71. class VersionEdit;
  72. class VersionSet;
  73. class WriteCallback;
  74. struct JobContext;
  75. struct ExternalSstFileInfo;
  76. struct MemTableInfo;
  77. // Class to maintain directories for all database paths other than main one.
  78. class Directories {
  79. public:
  80. Status SetDirectories(Env* env, const std::string& dbname,
  81. const std::string& wal_dir,
  82. const std::vector<DbPath>& data_paths);
  83. Directory* GetDataDir(size_t path_id) const {
  84. assert(path_id < data_dirs_.size());
  85. Directory* ret_dir = data_dirs_[path_id].get();
  86. if (ret_dir == nullptr) {
  87. // Should use db_dir_
  88. return db_dir_.get();
  89. }
  90. return ret_dir;
  91. }
  92. Directory* GetWalDir() {
  93. if (wal_dir_) {
  94. return wal_dir_.get();
  95. }
  96. return db_dir_.get();
  97. }
  98. Directory* GetDbDir() { return db_dir_.get(); }
  99. private:
  100. std::unique_ptr<Directory> db_dir_;
  101. std::vector<std::unique_ptr<Directory>> data_dirs_;
  102. std::unique_ptr<Directory> wal_dir_;
  103. };
  104. // While DB is the public interface of RocksDB, and DBImpl is the actual
  105. // class implementing it. It's the entrance of the core RocksdB engine.
  106. // All other DB implementations, e.g. TransactionDB, BlobDB, etc, wrap a
  107. // DBImpl internally.
  108. // Other than functions implementing the DB interface, some public
  109. // functions are there for other internal components to call. For
  110. // example, TransactionDB directly calls DBImpl::WriteImpl() and
  111. // BlobDB directly calls DBImpl::GetImpl(). Some other functions
  112. // are for sub-components to call. For example, ColumnFamilyHandleImpl
  113. // calls DBImpl::FindObsoleteFiles().
  114. //
  115. // Since it's a very large class, the definition of the functions is
  116. // divided in several db_impl_*.cc files, besides db_impl.cc.
  117. class DBImpl : public DB {
  118. public:
  119. DBImpl(const DBOptions& options, const std::string& dbname,
  120. const bool seq_per_batch = false, const bool batch_per_txn = true);
  121. // No copying allowed
  122. DBImpl(const DBImpl&) = delete;
  123. void operator=(const DBImpl&) = delete;
  124. virtual ~DBImpl();
  125. // ---- Implementations of the DB interface ----
  126. using DB::Resume;
  127. virtual Status Resume() override;
  128. using DB::Put;
  129. virtual Status Put(const WriteOptions& options,
  130. ColumnFamilyHandle* column_family, const Slice& key,
  131. const Slice& value) override;
  132. using DB::Merge;
  133. virtual Status Merge(const WriteOptions& options,
  134. ColumnFamilyHandle* column_family, const Slice& key,
  135. const Slice& value) override;
  136. using DB::Delete;
  137. virtual Status Delete(const WriteOptions& options,
  138. ColumnFamilyHandle* column_family,
  139. const Slice& key) override;
  140. using DB::SingleDelete;
  141. virtual Status SingleDelete(const WriteOptions& options,
  142. ColumnFamilyHandle* column_family,
  143. const Slice& key) override;
  144. using DB::Write;
  145. virtual Status Write(const WriteOptions& options,
  146. WriteBatch* updates) override;
  147. using DB::Get;
  148. virtual Status Get(const ReadOptions& options,
  149. ColumnFamilyHandle* column_family, const Slice& key,
  150. PinnableSlice* value) override;
  151. using DB::GetMergeOperands;
  152. Status GetMergeOperands(const ReadOptions& options,
  153. ColumnFamilyHandle* column_family, const Slice& key,
  154. PinnableSlice* merge_operands,
  155. GetMergeOperandsOptions* get_merge_operands_options,
  156. int* number_of_operands) override {
  157. GetImplOptions get_impl_options;
  158. get_impl_options.column_family = column_family;
  159. get_impl_options.merge_operands = merge_operands;
  160. get_impl_options.get_merge_operands_options = get_merge_operands_options;
  161. get_impl_options.number_of_operands = number_of_operands;
  162. get_impl_options.get_value = false;
  163. return GetImpl(options, key, get_impl_options);
  164. }
  165. using DB::MultiGet;
  166. virtual std::vector<Status> MultiGet(
  167. const ReadOptions& options,
  168. const std::vector<ColumnFamilyHandle*>& column_family,
  169. const std::vector<Slice>& keys,
  170. std::vector<std::string>* values) override;
  171. // This MultiGet is a batched version, which may be faster than calling Get
  172. // multiple times, especially if the keys have some spatial locality that
  173. // enables them to be queried in the same SST files/set of files. The larger
  174. // the batch size, the more scope for batching and performance improvement
  175. // The values and statuses parameters are arrays with number of elements
  176. // equal to keys.size(). This allows the storage for those to be alloacted
  177. // by the caller on the stack for small batches
  178. virtual void MultiGet(const ReadOptions& options,
  179. ColumnFamilyHandle* column_family,
  180. const size_t num_keys, const Slice* keys,
  181. PinnableSlice* values, Status* statuses,
  182. const bool sorted_input = false) override;
  183. virtual void MultiGet(const ReadOptions& options, const size_t num_keys,
  184. ColumnFamilyHandle** column_families, const Slice* keys,
  185. PinnableSlice* values, Status* statuses,
  186. const bool sorted_input = false) override;
  187. virtual void MultiGetWithCallback(
  188. const ReadOptions& options, ColumnFamilyHandle* column_family,
  189. ReadCallback* callback,
  190. autovector<KeyContext*, MultiGetContext::MAX_BATCH_SIZE>* sorted_keys);
  191. virtual Status CreateColumnFamily(const ColumnFamilyOptions& cf_options,
  192. const std::string& column_family,
  193. ColumnFamilyHandle** handle) override;
  194. virtual Status CreateColumnFamilies(
  195. const ColumnFamilyOptions& cf_options,
  196. const std::vector<std::string>& column_family_names,
  197. std::vector<ColumnFamilyHandle*>* handles) override;
  198. virtual Status CreateColumnFamilies(
  199. const std::vector<ColumnFamilyDescriptor>& column_families,
  200. std::vector<ColumnFamilyHandle*>* handles) override;
  201. virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override;
  202. virtual Status DropColumnFamilies(
  203. const std::vector<ColumnFamilyHandle*>& column_families) override;
  204. // Returns false if key doesn't exist in the database and true if it may.
  205. // If value_found is not passed in as null, then return the value if found in
  206. // memory. On return, if value was found, then value_found will be set to true
  207. // , otherwise false.
  208. using DB::KeyMayExist;
  209. virtual bool KeyMayExist(const ReadOptions& options,
  210. ColumnFamilyHandle* column_family, const Slice& key,
  211. std::string* value,
  212. bool* value_found = nullptr) override;
  213. using DB::NewIterator;
  214. virtual Iterator* NewIterator(const ReadOptions& options,
  215. ColumnFamilyHandle* column_family) override;
  216. virtual Status NewIterators(
  217. const ReadOptions& options,
  218. const std::vector<ColumnFamilyHandle*>& column_families,
  219. std::vector<Iterator*>* iterators) override;
  220. virtual const Snapshot* GetSnapshot() override;
  221. virtual void ReleaseSnapshot(const Snapshot* snapshot) override;
  222. using DB::GetProperty;
  223. virtual bool GetProperty(ColumnFamilyHandle* column_family,
  224. const Slice& property, std::string* value) override;
  225. using DB::GetMapProperty;
  226. virtual bool GetMapProperty(
  227. ColumnFamilyHandle* column_family, const Slice& property,
  228. std::map<std::string, std::string>* value) override;
  229. using DB::GetIntProperty;
  230. virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
  231. const Slice& property, uint64_t* value) override;
  232. using DB::GetAggregatedIntProperty;
  233. virtual bool GetAggregatedIntProperty(const Slice& property,
  234. uint64_t* aggregated_value) override;
  235. using DB::GetApproximateSizes;
  236. virtual Status GetApproximateSizes(const SizeApproximationOptions& options,
  237. ColumnFamilyHandle* column_family,
  238. const Range* range, int n,
  239. uint64_t* sizes) override;
  240. using DB::GetApproximateMemTableStats;
  241. virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
  242. const Range& range,
  243. uint64_t* const count,
  244. uint64_t* const size) override;
  245. using DB::CompactRange;
  246. virtual Status CompactRange(const CompactRangeOptions& options,
  247. ColumnFamilyHandle* column_family,
  248. const Slice* begin, const Slice* end) override;
  249. using DB::CompactFiles;
  250. virtual Status CompactFiles(
  251. const CompactionOptions& compact_options,
  252. ColumnFamilyHandle* column_family,
  253. const std::vector<std::string>& input_file_names, const int output_level,
  254. const int output_path_id = -1,
  255. std::vector<std::string>* const output_file_names = nullptr,
  256. CompactionJobInfo* compaction_job_info = nullptr) override;
  257. virtual Status PauseBackgroundWork() override;
  258. virtual Status ContinueBackgroundWork() override;
  259. virtual Status EnableAutoCompaction(
  260. const std::vector<ColumnFamilyHandle*>& column_family_handles) override;
  261. virtual void EnableManualCompaction() override;
  262. virtual void DisableManualCompaction() override;
  263. using DB::SetOptions;
  264. Status SetOptions(
  265. ColumnFamilyHandle* column_family,
  266. const std::unordered_map<std::string, std::string>& options_map) override;
  267. virtual Status SetDBOptions(
  268. const std::unordered_map<std::string, std::string>& options_map) override;
  269. using DB::NumberLevels;
  270. virtual int NumberLevels(ColumnFamilyHandle* column_family) override;
  271. using DB::MaxMemCompactionLevel;
  272. virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) override;
  273. using DB::Level0StopWriteTrigger;
  274. virtual int Level0StopWriteTrigger(
  275. ColumnFamilyHandle* column_family) override;
  276. virtual const std::string& GetName() const override;
  277. virtual Env* GetEnv() const override;
  278. virtual FileSystem* GetFileSystem() const override;
  279. using DB::GetOptions;
  280. virtual Options GetOptions(ColumnFamilyHandle* column_family) const override;
  281. using DB::GetDBOptions;
  282. virtual DBOptions GetDBOptions() const override;
  283. using DB::Flush;
  284. virtual Status Flush(const FlushOptions& options,
  285. ColumnFamilyHandle* column_family) override;
  286. virtual Status Flush(
  287. const FlushOptions& options,
  288. const std::vector<ColumnFamilyHandle*>& column_families) override;
  289. virtual Status FlushWAL(bool sync) override;
  290. bool TEST_WALBufferIsEmpty(bool lock = true);
  291. virtual Status SyncWAL() override;
  292. virtual Status LockWAL() override;
  293. virtual Status UnlockWAL() override;
  294. virtual SequenceNumber GetLatestSequenceNumber() const override;
  295. virtual bool SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) override;
  296. virtual Status GetDbIdentity(std::string& identity) const override;
  297. virtual Status GetDbIdentityFromIdentityFile(std::string* identity) const;
  298. ColumnFamilyHandle* DefaultColumnFamily() const override;
  299. ColumnFamilyHandle* PersistentStatsColumnFamily() const;
  300. virtual Status Close() override;
  301. Status GetStatsHistory(
  302. uint64_t start_time, uint64_t end_time,
  303. std::unique_ptr<StatsHistoryIterator>* stats_iterator) override;
  304. #ifndef ROCKSDB_LITE
  305. using DB::ResetStats;
  306. virtual Status ResetStats() override;
  307. virtual Status DisableFileDeletions() override;
  308. virtual Status EnableFileDeletions(bool force) override;
  309. virtual int IsFileDeletionsEnabled() const;
  310. // All the returned filenames start with "/"
  311. virtual Status GetLiveFiles(std::vector<std::string>&,
  312. uint64_t* manifest_file_size,
  313. bool flush_memtable = true) override;
  314. virtual Status GetSortedWalFiles(VectorLogPtr& files) override;
  315. virtual Status GetCurrentWalFile(
  316. std::unique_ptr<LogFile>* current_log_file) override;
  317. virtual Status GetCreationTimeOfOldestFile(
  318. uint64_t* creation_time) override;
  319. virtual Status GetUpdatesSince(
  320. SequenceNumber seq_number, std::unique_ptr<TransactionLogIterator>* iter,
  321. const TransactionLogIterator::ReadOptions& read_options =
  322. TransactionLogIterator::ReadOptions()) override;
  323. virtual Status DeleteFile(std::string name) override;
  324. Status DeleteFilesInRanges(ColumnFamilyHandle* column_family,
  325. const RangePtr* ranges, size_t n,
  326. bool include_end = true);
  327. virtual void GetLiveFilesMetaData(
  328. std::vector<LiveFileMetaData>* metadata) override;
  329. // Obtains the meta data of the specified column family of the DB.
  330. // Status::NotFound() will be returned if the current DB does not have
  331. // any column family match the specified name.
  332. // TODO(yhchiang): output parameter is placed in the end in this codebase.
  333. virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family,
  334. ColumnFamilyMetaData* metadata) override;
  335. Status SuggestCompactRange(ColumnFamilyHandle* column_family,
  336. const Slice* begin, const Slice* end) override;
  337. Status PromoteL0(ColumnFamilyHandle* column_family,
  338. int target_level) override;
  339. using DB::IngestExternalFile;
  340. virtual Status IngestExternalFile(
  341. ColumnFamilyHandle* column_family,
  342. const std::vector<std::string>& external_files,
  343. const IngestExternalFileOptions& ingestion_options) override;
  344. using DB::IngestExternalFiles;
  345. virtual Status IngestExternalFiles(
  346. const std::vector<IngestExternalFileArg>& args) override;
  347. using DB::CreateColumnFamilyWithImport;
  348. virtual Status CreateColumnFamilyWithImport(
  349. const ColumnFamilyOptions& options, const std::string& column_family_name,
  350. const ImportColumnFamilyOptions& import_options,
  351. const ExportImportFilesMetaData& metadata,
  352. ColumnFamilyHandle** handle) override;
  353. using DB::VerifyChecksum;
  354. virtual Status VerifyChecksum(const ReadOptions& /*read_options*/) override;
  355. using DB::StartTrace;
  356. virtual Status StartTrace(
  357. const TraceOptions& options,
  358. std::unique_ptr<TraceWriter>&& trace_writer) override;
  359. using DB::EndTrace;
  360. virtual Status EndTrace() override;
  361. using DB::StartBlockCacheTrace;
  362. Status StartBlockCacheTrace(
  363. const TraceOptions& options,
  364. std::unique_ptr<TraceWriter>&& trace_writer) override;
  365. using DB::EndBlockCacheTrace;
  366. Status EndBlockCacheTrace() override;
  367. using DB::GetPropertiesOfAllTables;
  368. virtual Status GetPropertiesOfAllTables(
  369. ColumnFamilyHandle* column_family,
  370. TablePropertiesCollection* props) override;
  371. virtual Status GetPropertiesOfTablesInRange(
  372. ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
  373. TablePropertiesCollection* props) override;
  374. #endif // ROCKSDB_LITE
  375. // ---- End of implementations of the DB interface ----
  376. struct GetImplOptions {
  377. ColumnFamilyHandle* column_family = nullptr;
  378. PinnableSlice* value = nullptr;
  379. bool* value_found = nullptr;
  380. ReadCallback* callback = nullptr;
  381. bool* is_blob_index = nullptr;
  382. // If true return value associated with key via value pointer else return
  383. // all merge operands for key via merge_operands pointer
  384. bool get_value = true;
  385. // Pointer to an array of size
  386. // get_merge_operands_options.expected_max_number_of_operands allocated by
  387. // user
  388. PinnableSlice* merge_operands = nullptr;
  389. GetMergeOperandsOptions* get_merge_operands_options = nullptr;
  390. int* number_of_operands = nullptr;
  391. };
  392. // Function that Get and KeyMayExist call with no_io true or false
  393. // Note: 'value_found' from KeyMayExist propagates here
  394. // This function is also called by GetMergeOperands
  395. // If get_impl_options.get_value = true get value associated with
  396. // get_impl_options.key via get_impl_options.value
  397. // If get_impl_options.get_value = false get merge operands associated with
  398. // get_impl_options.key via get_impl_options.merge_operands
  399. Status GetImpl(const ReadOptions& options, const Slice& key,
  400. GetImplOptions get_impl_options);
  401. ArenaWrappedDBIter* NewIteratorImpl(const ReadOptions& options,
  402. ColumnFamilyData* cfd,
  403. SequenceNumber snapshot,
  404. ReadCallback* read_callback,
  405. bool allow_blob = false,
  406. bool allow_refresh = true);
  407. virtual SequenceNumber GetLastPublishedSequence() const {
  408. if (last_seq_same_as_publish_seq_) {
  409. return versions_->LastSequence();
  410. } else {
  411. return versions_->LastPublishedSequence();
  412. }
  413. }
  414. // REQUIRES: joined the main write queue if two_write_queues is disabled, and
  415. // the second write queue otherwise.
  416. virtual void SetLastPublishedSequence(SequenceNumber seq);
  417. // Returns LastSequence in last_seq_same_as_publish_seq_
  418. // mode and LastAllocatedSequence otherwise. This is useful when visiblility
  419. // depends also on data written to the WAL but not to the memtable.
  420. SequenceNumber TEST_GetLastVisibleSequence() const;
  421. #ifndef ROCKSDB_LITE
  422. // Similar to Write() but will call the callback once on the single write
  423. // thread to determine whether it is safe to perform the write.
  424. virtual Status WriteWithCallback(const WriteOptions& write_options,
  425. WriteBatch* my_batch,
  426. WriteCallback* callback);
  427. // Returns the sequence number that is guaranteed to be smaller than or equal
  428. // to the sequence number of any key that could be inserted into the current
  429. // memtables. It can then be assumed that any write with a larger(or equal)
  430. // sequence number will be present in this memtable or a later memtable.
  431. //
  432. // If the earliest sequence number could not be determined,
  433. // kMaxSequenceNumber will be returned.
  434. //
  435. // If include_history=true, will also search Memtables in MemTableList
  436. // History.
  437. SequenceNumber GetEarliestMemTableSequenceNumber(SuperVersion* sv,
  438. bool include_history);
  439. // For a given key, check to see if there are any records for this key
  440. // in the memtables, including memtable history. If cache_only is false,
  441. // SST files will also be checked.
  442. //
  443. // If a key is found, *found_record_for_key will be set to true and
  444. // *seq will be set to the stored sequence number for the latest
  445. // operation on this key or kMaxSequenceNumber if unknown.
  446. // If no key is found, *found_record_for_key will be set to false.
  447. //
  448. // Note: If cache_only=false, it is possible for *seq to be set to 0 if
  449. // the sequence number has been cleared from the record. If the caller is
  450. // holding an active db snapshot, we know the missing sequence must be less
  451. // than the snapshot's sequence number (sequence numbers are only cleared
  452. // when there are no earlier active snapshots).
  453. //
  454. // If NotFound is returned and found_record_for_key is set to false, then no
  455. // record for this key was found. If the caller is holding an active db
  456. // snapshot, we know that no key could have existing after this snapshot
  457. // (since we do not compact keys that have an earlier snapshot).
  458. //
  459. // Only records newer than or at `lower_bound_seq` are guaranteed to be
  460. // returned. Memtables and files may not be checked if it only contains data
  461. // older than `lower_bound_seq`.
  462. //
  463. // Returns OK or NotFound on success,
  464. // other status on unexpected error.
  465. // TODO(andrewkr): this API need to be aware of range deletion operations
  466. Status GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
  467. bool cache_only,
  468. SequenceNumber lower_bound_seq,
  469. SequenceNumber* seq,
  470. bool* found_record_for_key,
  471. bool* is_blob_index = nullptr);
  472. Status TraceIteratorSeek(const uint32_t& cf_id, const Slice& key);
  473. Status TraceIteratorSeekForPrev(const uint32_t& cf_id, const Slice& key);
  474. #endif // ROCKSDB_LITE
  475. // Similar to GetSnapshot(), but also lets the db know that this snapshot
  476. // will be used for transaction write-conflict checking. The DB can then
  477. // make sure not to compact any keys that would prevent a write-conflict from
  478. // being detected.
  479. const Snapshot* GetSnapshotForWriteConflictBoundary();
  480. // checks if all live files exist on file system and that their file sizes
  481. // match to our in-memory records
  482. virtual Status CheckConsistency();
  483. // max_file_num_to_ignore allows bottom level compaction to filter out newly
  484. // compacted SST files. Setting max_file_num_to_ignore to kMaxUint64 will
  485. // disable the filtering
  486. Status RunManualCompaction(ColumnFamilyData* cfd, int input_level,
  487. int output_level,
  488. const CompactRangeOptions& compact_range_options,
  489. const Slice* begin, const Slice* end,
  490. bool exclusive, bool disallow_trivial_move,
  491. uint64_t max_file_num_to_ignore);
  492. // Return an internal iterator over the current state of the database.
  493. // The keys of this iterator are internal keys (see format.h).
  494. // The returned iterator should be deleted when no longer needed.
  495. InternalIterator* NewInternalIterator(
  496. Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
  497. ColumnFamilyHandle* column_family = nullptr);
  498. LogsWithPrepTracker* logs_with_prep_tracker() {
  499. return &logs_with_prep_tracker_;
  500. }
  501. struct BGJobLimits {
  502. int max_flushes;
  503. int max_compactions;
  504. };
  505. // Returns maximum background flushes and compactions allowed to be scheduled
  506. BGJobLimits GetBGJobLimits() const;
  507. // Need a static version that can be called during SanitizeOptions().
  508. static BGJobLimits GetBGJobLimits(int max_background_flushes,
  509. int max_background_compactions,
  510. int max_background_jobs,
  511. bool parallelize_compactions);
  512. // move logs pending closing from job_context to the DB queue and
  513. // schedule a purge
  514. void ScheduleBgLogWriterClose(JobContext* job_context);
  515. uint64_t MinLogNumberToKeep();
  516. // Returns the lower bound file number for SSTs that won't be deleted, even if
  517. // they're obsolete. This lower bound is used internally to prevent newly
  518. // created flush/compaction output files from being deleted before they're
  519. // installed. This technique avoids the need for tracking the exact numbers of
  520. // files pending creation, although it prevents more files than necessary from
  521. // being deleted.
  522. uint64_t MinObsoleteSstNumberToKeep();
  523. // Returns the list of live files in 'live' and the list
  524. // of all files in the filesystem in 'candidate_files'.
  525. // If force == false and the last call was less than
  526. // db_options_.delete_obsolete_files_period_micros microseconds ago,
  527. // it will not fill up the job_context
  528. void FindObsoleteFiles(JobContext* job_context, bool force,
  529. bool no_full_scan = false);
  530. // Diffs the files listed in filenames and those that do not
  531. // belong to live files are possibly removed. Also, removes all the
  532. // files in sst_delete_files and log_delete_files.
  533. // It is not necessary to hold the mutex when invoking this method.
  534. // If FindObsoleteFiles() was run, we need to also run
  535. // PurgeObsoleteFiles(), even if disable_delete_obsolete_files_ is true
  536. void PurgeObsoleteFiles(JobContext& background_contet,
  537. bool schedule_only = false);
  538. // Schedule a background job to actually delete obsolete files.
  539. void SchedulePurge();
  540. const SnapshotList& snapshots() const { return snapshots_; }
  541. // load list of snapshots to `snap_vector` that is no newer than `max_seq`
  542. // in ascending order.
  543. // `oldest_write_conflict_snapshot` is filled with the oldest snapshot
  544. // which satisfies SnapshotImpl.is_write_conflict_boundary_ = true.
  545. void LoadSnapshots(std::vector<SequenceNumber>* snap_vector,
  546. SequenceNumber* oldest_write_conflict_snapshot,
  547. const SequenceNumber& max_seq) const {
  548. InstrumentedMutexLock l(mutex());
  549. snapshots().GetAll(snap_vector, oldest_write_conflict_snapshot, max_seq);
  550. }
  551. const ImmutableDBOptions& immutable_db_options() const {
  552. return immutable_db_options_;
  553. }
  554. // Cancel all background jobs, including flush, compaction, background
  555. // purging, stats dumping threads, etc. If `wait` = true, wait for the
  556. // running jobs to abort or finish before returning. Otherwise, only
  557. // sends the signals.
  558. void CancelAllBackgroundWork(bool wait);
  559. // Find Super version and reference it. Based on options, it might return
  560. // the thread local cached one.
  561. // Call ReturnAndCleanupSuperVersion() when it is no longer needed.
  562. SuperVersion* GetAndRefSuperVersion(ColumnFamilyData* cfd);
  563. // Similar to the previous function but looks up based on a column family id.
  564. // nullptr will be returned if this column family no longer exists.
  565. // REQUIRED: this function should only be called on the write thread or if the
  566. // mutex is held.
  567. SuperVersion* GetAndRefSuperVersion(uint32_t column_family_id);
  568. // Un-reference the super version and clean it up if it is the last reference.
  569. void CleanupSuperVersion(SuperVersion* sv);
  570. // Un-reference the super version and return it to thread local cache if
  571. // needed. If it is the last reference of the super version. Clean it up
  572. // after un-referencing it.
  573. void ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd, SuperVersion* sv);
  574. // Similar to the previous function but looks up based on a column family id.
  575. // nullptr will be returned if this column family no longer exists.
  576. // REQUIRED: this function should only be called on the write thread.
  577. void ReturnAndCleanupSuperVersion(uint32_t colun_family_id, SuperVersion* sv);
  578. // REQUIRED: this function should only be called on the write thread or if the
  579. // mutex is held. Return value only valid until next call to this function or
  580. // mutex is released.
  581. ColumnFamilyHandle* GetColumnFamilyHandle(uint32_t column_family_id);
  582. // Same as above, should called without mutex held and not on write thread.
  583. std::unique_ptr<ColumnFamilyHandle> GetColumnFamilyHandleUnlocked(
  584. uint32_t column_family_id);
  585. // Returns the number of currently running flushes.
  586. // REQUIREMENT: mutex_ must be held when calling this function.
  587. int num_running_flushes() {
  588. mutex_.AssertHeld();
  589. return num_running_flushes_;
  590. }
  591. // Returns the number of currently running compactions.
  592. // REQUIREMENT: mutex_ must be held when calling this function.
  593. int num_running_compactions() {
  594. mutex_.AssertHeld();
  595. return num_running_compactions_;
  596. }
  597. const WriteController& write_controller() { return write_controller_; }
  598. InternalIterator* NewInternalIterator(
  599. const ReadOptions&, ColumnFamilyData* cfd, SuperVersion* super_version,
  600. Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence);
  601. // hollow transactions shell used for recovery.
  602. // these will then be passed to TransactionDB so that
  603. // locks can be reacquired before writing can resume.
  604. struct RecoveredTransaction {
  605. std::string name_;
  606. bool unprepared_;
  607. struct BatchInfo {
  608. uint64_t log_number_;
  609. // TODO(lth): For unprepared, the memory usage here can be big for
  610. // unprepared transactions. This is only useful for rollbacks, and we
  611. // can in theory just keep keyset for that.
  612. WriteBatch* batch_;
  613. // Number of sub-batches. A new sub-batch is created if txn attempts to
  614. // insert a duplicate key,seq to memtable. This is currently used in
  615. // WritePreparedTxn/WriteUnpreparedTxn.
  616. size_t batch_cnt_;
  617. };
  618. // This maps the seq of the first key in the batch to BatchInfo, which
  619. // contains WriteBatch and other information relevant to the batch.
  620. //
  621. // For WriteUnprepared, batches_ can have size greater than 1, but for
  622. // other write policies, it must be of size 1.
  623. std::map<SequenceNumber, BatchInfo> batches_;
  624. explicit RecoveredTransaction(const uint64_t log, const std::string& name,
  625. WriteBatch* batch, SequenceNumber seq,
  626. size_t batch_cnt, bool unprepared)
  627. : name_(name), unprepared_(unprepared) {
  628. batches_[seq] = {log, batch, batch_cnt};
  629. }
  630. ~RecoveredTransaction() {
  631. for (auto& it : batches_) {
  632. delete it.second.batch_;
  633. }
  634. }
  635. void AddBatch(SequenceNumber seq, uint64_t log_number, WriteBatch* batch,
  636. size_t batch_cnt, bool unprepared) {
  637. assert(batches_.count(seq) == 0);
  638. batches_[seq] = {log_number, batch, batch_cnt};
  639. // Prior state must be unprepared, since the prepare batch must be the
  640. // last batch.
  641. assert(unprepared_);
  642. unprepared_ = unprepared;
  643. }
  644. };
  645. bool allow_2pc() const { return immutable_db_options_.allow_2pc; }
  646. std::unordered_map<std::string, RecoveredTransaction*>
  647. recovered_transactions() {
  648. return recovered_transactions_;
  649. }
  650. RecoveredTransaction* GetRecoveredTransaction(const std::string& name) {
  651. auto it = recovered_transactions_.find(name);
  652. if (it == recovered_transactions_.end()) {
  653. return nullptr;
  654. } else {
  655. return it->second;
  656. }
  657. }
  658. void InsertRecoveredTransaction(const uint64_t log, const std::string& name,
  659. WriteBatch* batch, SequenceNumber seq,
  660. size_t batch_cnt, bool unprepared_batch) {
  661. // For WriteUnpreparedTxn, InsertRecoveredTransaction is called multiple
  662. // times for every unprepared batch encountered during recovery.
  663. //
  664. // If the transaction is prepared, then the last call to
  665. // InsertRecoveredTransaction will have unprepared_batch = false.
  666. auto rtxn = recovered_transactions_.find(name);
  667. if (rtxn == recovered_transactions_.end()) {
  668. recovered_transactions_[name] = new RecoveredTransaction(
  669. log, name, batch, seq, batch_cnt, unprepared_batch);
  670. } else {
  671. rtxn->second->AddBatch(seq, log, batch, batch_cnt, unprepared_batch);
  672. }
  673. logs_with_prep_tracker_.MarkLogAsContainingPrepSection(log);
  674. }
  675. void DeleteRecoveredTransaction(const std::string& name) {
  676. auto it = recovered_transactions_.find(name);
  677. assert(it != recovered_transactions_.end());
  678. auto* trx = it->second;
  679. recovered_transactions_.erase(it);
  680. for (const auto& info : trx->batches_) {
  681. logs_with_prep_tracker_.MarkLogAsHavingPrepSectionFlushed(
  682. info.second.log_number_);
  683. }
  684. delete trx;
  685. }
  686. void DeleteAllRecoveredTransactions() {
  687. for (auto it = recovered_transactions_.begin();
  688. it != recovered_transactions_.end(); ++it) {
  689. delete it->second;
  690. }
  691. recovered_transactions_.clear();
  692. }
  693. void AddToLogsToFreeQueue(log::Writer* log_writer) {
  694. logs_to_free_queue_.push_back(log_writer);
  695. }
  696. void AddSuperVersionsToFreeQueue(SuperVersion* sv) {
  697. superversions_to_free_queue_.push_back(sv);
  698. }
  699. void SetSnapshotChecker(SnapshotChecker* snapshot_checker);
  700. // Fill JobContext with snapshot information needed by flush and compaction.
  701. void GetSnapshotContext(JobContext* job_context,
  702. std::vector<SequenceNumber>* snapshot_seqs,
  703. SequenceNumber* earliest_write_conflict_snapshot,
  704. SnapshotChecker** snapshot_checker);
  705. // Not thread-safe.
  706. void SetRecoverableStatePreReleaseCallback(PreReleaseCallback* callback);
  707. InstrumentedMutex* mutex() const { return &mutex_; }
  708. // Initialize a brand new DB. The DB directory is expected to be empty before
  709. // calling it.
  710. Status NewDB();
  711. // This is to be used only by internal rocksdb classes.
  712. static Status Open(const DBOptions& db_options, const std::string& name,
  713. const std::vector<ColumnFamilyDescriptor>& column_families,
  714. std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
  715. const bool seq_per_batch, const bool batch_per_txn);
  716. static Status CreateAndNewDirectory(Env* env, const std::string& dirname,
  717. std::unique_ptr<Directory>* directory);
  718. // find stats map from stats_history_ with smallest timestamp in
  719. // the range of [start_time, end_time)
  720. bool FindStatsByTime(uint64_t start_time, uint64_t end_time,
  721. uint64_t* new_time,
  722. std::map<std::string, uint64_t>* stats_map);
  723. // Print information of all tombstones of all iterators to the std::string
  724. // This is only used by ldb. The output might be capped. Tombstones
  725. // printed out are not guaranteed to be in any order.
  726. Status TablesRangeTombstoneSummary(ColumnFamilyHandle* column_family,
  727. int max_entries_to_print,
  728. std::string* out_str);
  729. #ifndef NDEBUG
  730. // Compact any files in the named level that overlap [*begin, *end]
  731. Status TEST_CompactRange(int level, const Slice* begin, const Slice* end,
  732. ColumnFamilyHandle* column_family = nullptr,
  733. bool disallow_trivial_move = false);
  734. void TEST_SwitchWAL();
  735. bool TEST_UnableToReleaseOldestLog() { return unable_to_release_oldest_log_; }
  736. bool TEST_IsLogGettingFlushed() {
  737. return alive_log_files_.begin()->getting_flushed;
  738. }
  739. Status TEST_SwitchMemtable(ColumnFamilyData* cfd = nullptr);
  740. // Force current memtable contents to be flushed.
  741. Status TEST_FlushMemTable(bool wait = true, bool allow_write_stall = false,
  742. ColumnFamilyHandle* cfh = nullptr);
  743. Status TEST_FlushMemTable(ColumnFamilyData* cfd,
  744. const FlushOptions& flush_opts);
  745. // Flush (multiple) ColumnFamilyData without using ColumnFamilyHandle. This
  746. // is because in certain cases, we can flush column families, wait for the
  747. // flush to complete, but delete the column family handle before the wait
  748. // finishes. For example in CompactRange.
  749. Status TEST_AtomicFlushMemTables(const autovector<ColumnFamilyData*>& cfds,
  750. const FlushOptions& flush_opts);
  751. // Wait for memtable compaction
  752. Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr);
  753. // Wait for any compaction
  754. // We add a bool parameter to wait for unscheduledCompactions_ == 0, but this
  755. // is only for the special test of CancelledCompactions
  756. Status TEST_WaitForCompact(bool waitUnscheduled = false);
  757. // Return the maximum overlapping data (in bytes) at next level for any
  758. // file at a level >= 1.
  759. int64_t TEST_MaxNextLevelOverlappingBytes(
  760. ColumnFamilyHandle* column_family = nullptr);
  761. // Return the current manifest file no.
  762. uint64_t TEST_Current_Manifest_FileNo();
  763. // Returns the number that'll be assigned to the next file that's created.
  764. uint64_t TEST_Current_Next_FileNo();
  765. // get total level0 file size. Only for testing.
  766. uint64_t TEST_GetLevel0TotalSize();
  767. void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family,
  768. std::vector<std::vector<FileMetaData>>* metadata);
  769. void TEST_LockMutex();
  770. void TEST_UnlockMutex();
  771. // REQUIRES: mutex locked
  772. void* TEST_BeginWrite();
  773. // REQUIRES: mutex locked
  774. // pass the pointer that you got from TEST_BeginWrite()
  775. void TEST_EndWrite(void* w);
  776. uint64_t TEST_MaxTotalInMemoryState() const {
  777. return max_total_in_memory_state_;
  778. }
  779. size_t TEST_LogsToFreeSize();
  780. uint64_t TEST_LogfileNumber();
  781. uint64_t TEST_total_log_size() const { return total_log_size_; }
  782. // Returns column family name to ImmutableCFOptions map.
  783. Status TEST_GetAllImmutableCFOptions(
  784. std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map);
  785. // Return the lastest MutableCFOptions of a column family
  786. Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family,
  787. MutableCFOptions* mutable_cf_options);
  788. Cache* TEST_table_cache() { return table_cache_.get(); }
  789. WriteController& TEST_write_controler() { return write_controller_; }
  790. uint64_t TEST_FindMinLogContainingOutstandingPrep();
  791. uint64_t TEST_FindMinPrepLogReferencedByMemTable();
  792. size_t TEST_PreparedSectionCompletedSize();
  793. size_t TEST_LogsWithPrepSize();
  794. int TEST_BGCompactionsAllowed() const;
  795. int TEST_BGFlushesAllowed() const;
  796. size_t TEST_GetWalPreallocateBlockSize(uint64_t write_buffer_size) const;
  797. void TEST_WaitForDumpStatsRun(std::function<void()> callback) const;
  798. void TEST_WaitForPersistStatsRun(std::function<void()> callback) const;
  799. bool TEST_IsPersistentStatsEnabled() const;
  800. size_t TEST_EstimateInMemoryStatsHistorySize() const;
  801. #endif // NDEBUG
  802. protected:
  803. const std::string dbname_;
  804. std::string db_id_;
  805. std::unique_ptr<VersionSet> versions_;
  806. // Flag to check whether we allocated and own the info log file
  807. bool own_info_log_;
  808. const DBOptions initial_db_options_;
  809. Env* const env_;
  810. std::shared_ptr<FileSystem> fs_;
  811. const ImmutableDBOptions immutable_db_options_;
  812. MutableDBOptions mutable_db_options_;
  813. Statistics* stats_;
  814. std::unordered_map<std::string, RecoveredTransaction*>
  815. recovered_transactions_;
  816. std::unique_ptr<Tracer> tracer_;
  817. InstrumentedMutex trace_mutex_;
  818. BlockCacheTracer block_cache_tracer_;
  819. // State below is protected by mutex_
  820. // With two_write_queues enabled, some of the variables that accessed during
  821. // WriteToWAL need different synchronization: log_empty_, alive_log_files_,
  822. // logs_, logfile_number_. Refer to the definition of each variable below for
  823. // more description.
  824. mutable InstrumentedMutex mutex_;
  825. ColumnFamilyHandleImpl* default_cf_handle_;
  826. InternalStats* default_cf_internal_stats_;
  827. // only used for dynamically adjusting max_total_wal_size. it is a sum of
  828. // [write_buffer_size * max_write_buffer_number] over all column families
  829. uint64_t max_total_in_memory_state_;
  830. // If true, we have only one (default) column family. We use this to optimize
  831. // some code-paths
  832. bool single_column_family_mode_;
  833. // The options to access storage files
  834. const FileOptions file_options_;
  835. // Additonal options for compaction and flush
  836. FileOptions file_options_for_compaction_;
  837. std::unique_ptr<ColumnFamilyMemTablesImpl> column_family_memtables_;
  838. // Increase the sequence number after writing each batch, whether memtable is
  839. // disabled for that or not. Otherwise the sequence number is increased after
  840. // writing each key into memtable. This implies that when disable_memtable is
  841. // set, the seq is not increased at all.
  842. //
  843. // Default: false
  844. const bool seq_per_batch_;
  845. // This determines during recovery whether we expect one writebatch per
  846. // recovered transaction, or potentially multiple writebatches per
  847. // transaction. For WriteUnprepared, this is set to false, since multiple
  848. // batches can exist per transaction.
  849. //
  850. // Default: true
  851. const bool batch_per_txn_;
  852. // Except in DB::Open(), WriteOptionsFile can only be called when:
  853. // Persist options to options file.
  854. // If need_mutex_lock = false, the method will lock DB mutex.
  855. // If need_enter_write_thread = false, the method will enter write thread.
  856. Status WriteOptionsFile(bool need_mutex_lock, bool need_enter_write_thread);
  857. // The following two functions can only be called when:
  858. // 1. WriteThread::Writer::EnterUnbatched() is used.
  859. // 2. db_mutex is NOT held
  860. Status RenameTempFileToOptionsFile(const std::string& file_name);
  861. Status DeleteObsoleteOptionsFiles();
  862. void NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
  863. const MutableCFOptions& mutable_cf_options,
  864. int job_id);
  865. void NotifyOnFlushCompleted(
  866. ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
  867. std::list<std::unique_ptr<FlushJobInfo>>* flush_jobs_info);
  868. void NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
  869. const Status& st,
  870. const CompactionJobStats& job_stats, int job_id);
  871. void NotifyOnCompactionCompleted(ColumnFamilyData* cfd, Compaction* c,
  872. const Status& st,
  873. const CompactionJobStats& job_stats,
  874. int job_id);
  875. void NotifyOnMemTableSealed(ColumnFamilyData* cfd,
  876. const MemTableInfo& mem_table_info);
  877. #ifndef ROCKSDB_LITE
  878. void NotifyOnExternalFileIngested(
  879. ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job);
  880. #endif // !ROCKSDB_LITE
  881. void NewThreadStatusCfInfo(ColumnFamilyData* cfd) const;
  882. void EraseThreadStatusCfInfo(ColumnFamilyData* cfd) const;
  883. void EraseThreadStatusDbInfo() const;
  884. // If disable_memtable is set the application logic must guarantee that the
  885. // batch will still be skipped from memtable during the recovery. An excption
  886. // to this is seq_per_batch_ mode, in which since each batch already takes one
  887. // seq, it is ok for the batch to write to memtable during recovery as long as
  888. // it only takes one sequence number: i.e., no duplicate keys.
  889. // In WriteCommitted it is guarnateed since disable_memtable is used for
  890. // prepare batch which will be written to memtable later during the commit,
  891. // and in WritePrepared it is guaranteed since it will be used only for WAL
  892. // markers which will never be written to memtable. If the commit marker is
  893. // accompanied with CommitTimeWriteBatch that is not written to memtable as
  894. // long as it has no duplicate keys, it does not violate the one-seq-per-batch
  895. // policy.
  896. // batch_cnt is expected to be non-zero in seq_per_batch mode and
  897. // indicates the number of sub-patches. A sub-patch is a subset of the write
  898. // batch that does not have duplicate keys.
  899. Status WriteImpl(const WriteOptions& options, WriteBatch* updates,
  900. WriteCallback* callback = nullptr,
  901. uint64_t* log_used = nullptr, uint64_t log_ref = 0,
  902. bool disable_memtable = false, uint64_t* seq_used = nullptr,
  903. size_t batch_cnt = 0,
  904. PreReleaseCallback* pre_release_callback = nullptr);
  905. Status PipelinedWriteImpl(const WriteOptions& options, WriteBatch* updates,
  906. WriteCallback* callback = nullptr,
  907. uint64_t* log_used = nullptr, uint64_t log_ref = 0,
  908. bool disable_memtable = false,
  909. uint64_t* seq_used = nullptr);
  910. // Write only to memtables without joining any write queue
  911. Status UnorderedWriteMemtable(const WriteOptions& write_options,
  912. WriteBatch* my_batch, WriteCallback* callback,
  913. uint64_t log_ref, SequenceNumber seq,
  914. const size_t sub_batch_cnt);
  915. // Whether the batch requires to be assigned with an order
  916. enum AssignOrder : bool { kDontAssignOrder, kDoAssignOrder };
  917. // Whether it requires publishing last sequence or not
  918. enum PublishLastSeq : bool { kDontPublishLastSeq, kDoPublishLastSeq };
  919. // Join the write_thread to write the batch only to the WAL. It is the
  920. // responsibility of the caller to also write the write batch to the memtable
  921. // if it required.
  922. //
  923. // sub_batch_cnt is expected to be non-zero when assign_order = kDoAssignOrder
  924. // indicating the number of sub-batches in my_batch. A sub-patch is a subset
  925. // of the write batch that does not have duplicate keys. When seq_per_batch is
  926. // not set, each key is a separate sub_batch. Otherwise each duplicate key
  927. // marks start of a new sub-batch.
  928. Status WriteImplWALOnly(
  929. WriteThread* write_thread, const WriteOptions& options,
  930. WriteBatch* updates, WriteCallback* callback, uint64_t* log_used,
  931. const uint64_t log_ref, uint64_t* seq_used, const size_t sub_batch_cnt,
  932. PreReleaseCallback* pre_release_callback, const AssignOrder assign_order,
  933. const PublishLastSeq publish_last_seq, const bool disable_memtable);
  934. // write cached_recoverable_state_ to memtable if it is not empty
  935. // The writer must be the leader in write_thread_ and holding mutex_
  936. Status WriteRecoverableState();
  937. // Actual implementation of Close()
  938. Status CloseImpl();
  939. // Recover the descriptor from persistent storage. May do a significant
  940. // amount of work to recover recently logged updates. Any changes to
  941. // be made to the descriptor are added to *edit.
  942. // recovered_seq is set to less than kMaxSequenceNumber if the log's tail is
  943. // skipped.
  944. virtual Status Recover(
  945. const std::vector<ColumnFamilyDescriptor>& column_families,
  946. bool read_only = false, bool error_if_log_file_exist = false,
  947. bool error_if_data_exists_in_logs = false,
  948. uint64_t* recovered_seq = nullptr);
  949. virtual bool OwnTablesAndLogs() const { return true; }
  950. private:
  951. friend class DB;
  952. friend class ErrorHandler;
  953. friend class InternalStats;
  954. friend class PessimisticTransaction;
  955. friend class TransactionBaseImpl;
  956. friend class WriteCommittedTxn;
  957. friend class WritePreparedTxn;
  958. friend class WritePreparedTxnDB;
  959. friend class WriteBatchWithIndex;
  960. friend class WriteUnpreparedTxnDB;
  961. friend class WriteUnpreparedTxn;
  962. #ifndef ROCKSDB_LITE
  963. friend class ForwardIterator;
  964. #endif
  965. friend struct SuperVersion;
  966. friend class CompactedDBImpl;
  967. friend class DBTest_ConcurrentFlushWAL_Test;
  968. friend class DBTest_MixedSlowdownOptionsStop_Test;
  969. friend class DBCompactionTest_CompactBottomLevelFilesWithDeletions_Test;
  970. friend class DBCompactionTest_CompactionDuringShutdown_Test;
  971. friend class StatsHistoryTest_PersistentStatsCreateColumnFamilies_Test;
  972. #ifndef NDEBUG
  973. friend class DBTest2_ReadCallbackTest_Test;
  974. friend class WriteCallbackTest_WriteWithCallbackTest_Test;
  975. friend class XFTransactionWriteHandler;
  976. friend class DBBlobIndexTest;
  977. friend class WriteUnpreparedTransactionTest_RecoveryTest_Test;
  978. #endif
  979. struct CompactionState;
  980. struct PrepickedCompaction;
  981. struct PurgeFileInfo;
  982. struct WriteContext {
  983. SuperVersionContext superversion_context;
  984. autovector<MemTable*> memtables_to_free_;
  985. explicit WriteContext(bool create_superversion = false)
  986. : superversion_context(create_superversion) {}
  987. ~WriteContext() {
  988. superversion_context.Clean();
  989. for (auto& m : memtables_to_free_) {
  990. delete m;
  991. }
  992. }
  993. };
  994. struct LogFileNumberSize {
  995. explicit LogFileNumberSize(uint64_t _number) : number(_number) {}
  996. void AddSize(uint64_t new_size) { size += new_size; }
  997. uint64_t number;
  998. uint64_t size = 0;
  999. bool getting_flushed = false;
  1000. };
  1001. struct LogWriterNumber {
  1002. // pass ownership of _writer
  1003. LogWriterNumber(uint64_t _number, log::Writer* _writer)
  1004. : number(_number), writer(_writer) {}
  1005. log::Writer* ReleaseWriter() {
  1006. auto* w = writer;
  1007. writer = nullptr;
  1008. return w;
  1009. }
  1010. Status ClearWriter() {
  1011. Status s = writer->WriteBuffer();
  1012. delete writer;
  1013. writer = nullptr;
  1014. return s;
  1015. }
  1016. uint64_t number;
  1017. // Visual Studio doesn't support deque's member to be noncopyable because
  1018. // of a std::unique_ptr as a member.
  1019. log::Writer* writer; // own
  1020. // true for some prefix of logs_
  1021. bool getting_synced = false;
  1022. };
  1023. // PurgeFileInfo is a structure to hold information of files to be deleted in
  1024. // purge_files_
  1025. struct PurgeFileInfo {
  1026. std::string fname;
  1027. std::string dir_to_sync;
  1028. FileType type;
  1029. uint64_t number;
  1030. int job_id;
  1031. PurgeFileInfo(std::string fn, std::string d, FileType t, uint64_t num,
  1032. int jid)
  1033. : fname(fn), dir_to_sync(d), type(t), number(num), job_id(jid) {}
  1034. };
  1035. // Argument required by background flush thread.
  1036. struct BGFlushArg {
  1037. BGFlushArg()
  1038. : cfd_(nullptr), max_memtable_id_(0), superversion_context_(nullptr) {}
  1039. BGFlushArg(ColumnFamilyData* cfd, uint64_t max_memtable_id,
  1040. SuperVersionContext* superversion_context)
  1041. : cfd_(cfd),
  1042. max_memtable_id_(max_memtable_id),
  1043. superversion_context_(superversion_context) {}
  1044. // Column family to flush.
  1045. ColumnFamilyData* cfd_;
  1046. // Maximum ID of memtable to flush. In this column family, memtables with
  1047. // IDs smaller than this value must be flushed before this flush completes.
  1048. uint64_t max_memtable_id_;
  1049. // Pointer to a SuperVersionContext object. After flush completes, RocksDB
  1050. // installs a new superversion for the column family. This operation
  1051. // requires a SuperVersionContext object (currently embedded in JobContext).
  1052. SuperVersionContext* superversion_context_;
  1053. };
  1054. // Argument passed to flush thread.
  1055. struct FlushThreadArg {
  1056. DBImpl* db_;
  1057. Env::Priority thread_pri_;
  1058. };
  1059. // Information for a manual compaction
  1060. struct ManualCompactionState {
  1061. ColumnFamilyData* cfd;
  1062. int input_level;
  1063. int output_level;
  1064. uint32_t output_path_id;
  1065. Status status;
  1066. bool done;
  1067. bool in_progress; // compaction request being processed?
  1068. bool incomplete; // only part of requested range compacted
  1069. bool exclusive; // current behavior of only one manual
  1070. bool disallow_trivial_move; // Force actual compaction to run
  1071. const InternalKey* begin; // nullptr means beginning of key range
  1072. const InternalKey* end; // nullptr means end of key range
  1073. InternalKey* manual_end; // how far we are compacting
  1074. InternalKey tmp_storage; // Used to keep track of compaction progress
  1075. InternalKey tmp_storage1; // Used to keep track of compaction progress
  1076. };
  1077. struct PrepickedCompaction {
  1078. // background compaction takes ownership of `compaction`.
  1079. Compaction* compaction;
  1080. // caller retains ownership of `manual_compaction_state` as it is reused
  1081. // across background compactions.
  1082. ManualCompactionState* manual_compaction_state; // nullptr if non-manual
  1083. // task limiter token is requested during compaction picking.
  1084. std::unique_ptr<TaskLimiterToken> task_token;
  1085. };
  1086. struct CompactionArg {
  1087. // caller retains ownership of `db`.
  1088. DBImpl* db;
  1089. // background compaction takes ownership of `prepicked_compaction`.
  1090. PrepickedCompaction* prepicked_compaction;
  1091. };
  1092. // Initialize the built-in column family for persistent stats. Depending on
  1093. // whether on-disk persistent stats have been enabled before, it may either
  1094. // create a new column family and column family handle or just a column family
  1095. // handle.
  1096. // Required: DB mutex held
  1097. Status InitPersistStatsColumnFamily();
  1098. // Persistent Stats column family has two format version key which are used
  1099. // for compatibility check. Write format version if it's created for the
  1100. // first time, read format version and check compatibility if recovering
  1101. // from disk. This function requires DB mutex held at entrance but may
  1102. // release and re-acquire DB mutex in the process.
  1103. // Required: DB mutex held
  1104. Status PersistentStatsProcessFormatVersion();
  1105. Status ResumeImpl();
  1106. void MaybeIgnoreError(Status* s) const;
  1107. const Status CreateArchivalDirectory();
  1108. Status CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
  1109. const std::string& cf_name,
  1110. ColumnFamilyHandle** handle);
  1111. Status DropColumnFamilyImpl(ColumnFamilyHandle* column_family);
  1112. // Delete any unneeded files and stale in-memory entries.
  1113. void DeleteObsoleteFiles();
  1114. // Delete obsolete files and log status and information of file deletion
  1115. void DeleteObsoleteFileImpl(int job_id, const std::string& fname,
  1116. const std::string& path_to_sync, FileType type,
  1117. uint64_t number);
  1118. // Background process needs to call
  1119. // auto x = CaptureCurrentFileNumberInPendingOutputs()
  1120. // auto file_num = versions_->NewFileNumber();
  1121. // <do something>
  1122. // ReleaseFileNumberFromPendingOutputs(x)
  1123. // This will protect any file with number `file_num` or greater from being
  1124. // deleted while <do something> is running.
  1125. // -----------
  1126. // This function will capture current file number and append it to
  1127. // pending_outputs_. This will prevent any background process to delete any
  1128. // file created after this point.
  1129. std::list<uint64_t>::iterator CaptureCurrentFileNumberInPendingOutputs();
  1130. // This function should be called with the result of
  1131. // CaptureCurrentFileNumberInPendingOutputs(). It then marks that any file
  1132. // created between the calls CaptureCurrentFileNumberInPendingOutputs() and
  1133. // ReleaseFileNumberFromPendingOutputs() can now be deleted (if it's not live
  1134. // and blocked by any other pending_outputs_ calls)
  1135. void ReleaseFileNumberFromPendingOutputs(
  1136. std::unique_ptr<std::list<uint64_t>::iterator>& v);
  1137. Status SyncClosedLogs(JobContext* job_context);
  1138. // Flush the in-memory write buffer to storage. Switches to a new
  1139. // log-file/memtable and writes a new descriptor iff successful. Then
  1140. // installs a new super version for the column family.
  1141. Status FlushMemTableToOutputFile(
  1142. ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
  1143. bool* madeProgress, JobContext* job_context,
  1144. SuperVersionContext* superversion_context,
  1145. std::vector<SequenceNumber>& snapshot_seqs,
  1146. SequenceNumber earliest_write_conflict_snapshot,
  1147. SnapshotChecker* snapshot_checker, LogBuffer* log_buffer,
  1148. Env::Priority thread_pri);
  1149. // Flush the memtables of (multiple) column families to multiple files on
  1150. // persistent storage.
  1151. Status FlushMemTablesToOutputFiles(
  1152. const autovector<BGFlushArg>& bg_flush_args, bool* made_progress,
  1153. JobContext* job_context, LogBuffer* log_buffer, Env::Priority thread_pri);
  1154. Status AtomicFlushMemTablesToOutputFiles(
  1155. const autovector<BGFlushArg>& bg_flush_args, bool* made_progress,
  1156. JobContext* job_context, LogBuffer* log_buffer, Env::Priority thread_pri);
  1157. // REQUIRES: log_numbers are sorted in ascending order
  1158. // corrupted_log_found is set to true if we recover from a corrupted log file.
  1159. Status RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
  1160. SequenceNumber* next_sequence, bool read_only,
  1161. bool* corrupted_log_found);
  1162. // The following two methods are used to flush a memtable to
  1163. // storage. The first one is used at database RecoveryTime (when the
  1164. // database is opened) and is heavyweight because it holds the mutex
  1165. // for the entire period. The second method WriteLevel0Table supports
  1166. // concurrent flush memtables to storage.
  1167. Status WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
  1168. MemTable* mem, VersionEdit* edit);
  1169. // Restore alive_log_files_ and total_log_size_ after recovery.
  1170. // It needs to run only when there's no flush during recovery
  1171. // (e.g. avoid_flush_during_recovery=true). May also trigger flush
  1172. // in case total_log_size > max_total_wal_size.
  1173. Status RestoreAliveLogFiles(const std::vector<uint64_t>& log_numbers);
  1174. // num_bytes: for slowdown case, delay time is calculated based on
  1175. // `num_bytes` going through.
  1176. Status DelayWrite(uint64_t num_bytes, const WriteOptions& write_options);
  1177. Status ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
  1178. WriteBatch* my_batch);
  1179. // REQUIRES: mutex locked and in write thread.
  1180. Status ScheduleFlushes(WriteContext* context);
  1181. void MaybeFlushStatsCF(autovector<ColumnFamilyData*>* cfds);
  1182. Status TrimMemtableHistory(WriteContext* context);
  1183. Status SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context);
  1184. void SelectColumnFamiliesForAtomicFlush(autovector<ColumnFamilyData*>* cfds);
  1185. // Force current memtable contents to be flushed.
  1186. Status FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options,
  1187. FlushReason flush_reason, bool writes_stopped = false);
  1188. Status AtomicFlushMemTables(
  1189. const autovector<ColumnFamilyData*>& column_family_datas,
  1190. const FlushOptions& options, FlushReason flush_reason,
  1191. bool writes_stopped = false);
  1192. // Wait until flushing this column family won't stall writes
  1193. Status WaitUntilFlushWouldNotStallWrites(ColumnFamilyData* cfd,
  1194. bool* flush_needed);
  1195. // Wait for memtable flushed.
  1196. // If flush_memtable_id is non-null, wait until the memtable with the ID
  1197. // gets flush. Otherwise, wait until the column family don't have any
  1198. // memtable pending flush.
  1199. // resuming_from_bg_err indicates whether the caller is attempting to resume
  1200. // from background error.
  1201. Status WaitForFlushMemTable(ColumnFamilyData* cfd,
  1202. const uint64_t* flush_memtable_id = nullptr,
  1203. bool resuming_from_bg_err = false) {
  1204. return WaitForFlushMemTables({cfd}, {flush_memtable_id},
  1205. resuming_from_bg_err);
  1206. }
  1207. // Wait for memtables to be flushed for multiple column families.
  1208. Status WaitForFlushMemTables(
  1209. const autovector<ColumnFamilyData*>& cfds,
  1210. const autovector<const uint64_t*>& flush_memtable_ids,
  1211. bool resuming_from_bg_err);
  1212. inline void WaitForPendingWrites() {
  1213. mutex_.AssertHeld();
  1214. TEST_SYNC_POINT("DBImpl::WaitForPendingWrites:BeforeBlock");
  1215. // In case of pipelined write is enabled, wait for all pending memtable
  1216. // writers.
  1217. if (immutable_db_options_.enable_pipelined_write) {
  1218. // Memtable writers may call DB::Get in case max_successive_merges > 0,
  1219. // which may lock mutex. Unlocking mutex here to avoid deadlock.
  1220. mutex_.Unlock();
  1221. write_thread_.WaitForMemTableWriters();
  1222. mutex_.Lock();
  1223. }
  1224. if (!immutable_db_options_.unordered_write) {
  1225. // Then the writes are finished before the next write group starts
  1226. return;
  1227. }
  1228. // Wait for the ones who already wrote to the WAL to finish their
  1229. // memtable write.
  1230. if (pending_memtable_writes_.load() != 0) {
  1231. std::unique_lock<std::mutex> guard(switch_mutex_);
  1232. switch_cv_.wait(guard,
  1233. [&] { return pending_memtable_writes_.load() == 0; });
  1234. }
  1235. }
  1236. // REQUIRES: mutex locked and in write thread.
  1237. void AssignAtomicFlushSeq(const autovector<ColumnFamilyData*>& cfds);
  1238. // REQUIRES: mutex locked and in write thread.
  1239. Status SwitchWAL(WriteContext* write_context);
  1240. // REQUIRES: mutex locked and in write thread.
  1241. Status HandleWriteBufferFull(WriteContext* write_context);
  1242. // REQUIRES: mutex locked
  1243. Status PreprocessWrite(const WriteOptions& write_options, bool* need_log_sync,
  1244. WriteContext* write_context);
  1245. WriteBatch* MergeBatch(const WriteThread::WriteGroup& write_group,
  1246. WriteBatch* tmp_batch, size_t* write_with_wal,
  1247. WriteBatch** to_be_cached_state);
  1248. Status WriteToWAL(const WriteBatch& merged_batch, log::Writer* log_writer,
  1249. uint64_t* log_used, uint64_t* log_size);
  1250. Status WriteToWAL(const WriteThread::WriteGroup& write_group,
  1251. log::Writer* log_writer, uint64_t* log_used,
  1252. bool need_log_sync, bool need_log_dir_sync,
  1253. SequenceNumber sequence);
  1254. Status ConcurrentWriteToWAL(const WriteThread::WriteGroup& write_group,
  1255. uint64_t* log_used, SequenceNumber* last_sequence,
  1256. size_t seq_inc);
  1257. // Used by WriteImpl to update bg_error_ if paranoid check is enabled.
  1258. void WriteStatusCheck(const Status& status);
  1259. // Used by WriteImpl to update bg_error_ in case of memtable insert error.
  1260. void MemTableInsertStatusCheck(const Status& memtable_insert_status);
  1261. #ifndef ROCKSDB_LITE
  1262. Status CompactFilesImpl(const CompactionOptions& compact_options,
  1263. ColumnFamilyData* cfd, Version* version,
  1264. const std::vector<std::string>& input_file_names,
  1265. std::vector<std::string>* const output_file_names,
  1266. const int output_level, int output_path_id,
  1267. JobContext* job_context, LogBuffer* log_buffer,
  1268. CompactionJobInfo* compaction_job_info);
  1269. // Wait for current IngestExternalFile() calls to finish.
  1270. // REQUIRES: mutex_ held
  1271. void WaitForIngestFile();
  1272. #else
  1273. // IngestExternalFile is not supported in ROCKSDB_LITE so this function
  1274. // will be no-op
  1275. void WaitForIngestFile() {}
  1276. #endif // ROCKSDB_LITE
  1277. ColumnFamilyData* GetColumnFamilyDataByName(const std::string& cf_name);
  1278. void MaybeScheduleFlushOrCompaction();
  1279. // A flush request specifies the column families to flush as well as the
  1280. // largest memtable id to persist for each column family. Once all the
  1281. // memtables whose IDs are smaller than or equal to this per-column-family
  1282. // specified value, this flush request is considered to have completed its
  1283. // work of flushing this column family. After completing the work for all
  1284. // column families in this request, this flush is considered complete.
  1285. typedef std::vector<std::pair<ColumnFamilyData*, uint64_t>> FlushRequest;
  1286. void GenerateFlushRequest(const autovector<ColumnFamilyData*>& cfds,
  1287. FlushRequest* req);
  1288. void SchedulePendingFlush(const FlushRequest& req, FlushReason flush_reason);
  1289. void SchedulePendingCompaction(ColumnFamilyData* cfd);
  1290. void SchedulePendingPurge(std::string fname, std::string dir_to_sync,
  1291. FileType type, uint64_t number, int job_id);
  1292. static void BGWorkCompaction(void* arg);
  1293. // Runs a pre-chosen universal compaction involving bottom level in a
  1294. // separate, bottom-pri thread pool.
  1295. static void BGWorkBottomCompaction(void* arg);
  1296. static void BGWorkFlush(void* arg);
  1297. static void BGWorkPurge(void* arg);
  1298. static void UnscheduleCompactionCallback(void* arg);
  1299. static void UnscheduleFlushCallback(void* arg);
  1300. void BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
  1301. Env::Priority thread_pri);
  1302. void BackgroundCallFlush(Env::Priority thread_pri);
  1303. void BackgroundCallPurge();
  1304. Status BackgroundCompaction(bool* madeProgress, JobContext* job_context,
  1305. LogBuffer* log_buffer,
  1306. PrepickedCompaction* prepicked_compaction,
  1307. Env::Priority thread_pri);
  1308. Status BackgroundFlush(bool* madeProgress, JobContext* job_context,
  1309. LogBuffer* log_buffer, FlushReason* reason,
  1310. Env::Priority thread_pri);
  1311. bool EnoughRoomForCompaction(ColumnFamilyData* cfd,
  1312. const std::vector<CompactionInputFiles>& inputs,
  1313. bool* sfm_bookkeeping, LogBuffer* log_buffer);
  1314. // Request compaction tasks token from compaction thread limiter.
  1315. // It always succeeds if force = true or limiter is disable.
  1316. bool RequestCompactionToken(ColumnFamilyData* cfd, bool force,
  1317. std::unique_ptr<TaskLimiterToken>* token,
  1318. LogBuffer* log_buffer);
  1319. // Schedule background tasks
  1320. void StartTimedTasks();
  1321. void PrintStatistics();
  1322. size_t EstimateInMemoryStatsHistorySize() const;
  1323. // persist stats to column family "_persistent_stats"
  1324. void PersistStats();
  1325. // dump rocksdb.stats to LOG
  1326. void DumpStats();
  1327. // Return the minimum empty level that could hold the total data in the
  1328. // input level. Return the input level, if such level could not be found.
  1329. int FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
  1330. const MutableCFOptions& mutable_cf_options,
  1331. int level);
  1332. // Move the files in the input level to the target level.
  1333. // If target_level < 0, automatically calculate the minimum level that could
  1334. // hold the data set.
  1335. Status ReFitLevel(ColumnFamilyData* cfd, int level, int target_level = -1);
  1336. // helper functions for adding and removing from flush & compaction queues
  1337. void AddToCompactionQueue(ColumnFamilyData* cfd);
  1338. ColumnFamilyData* PopFirstFromCompactionQueue();
  1339. FlushRequest PopFirstFromFlushQueue();
  1340. // Pick the first unthrottled compaction with task token from queue.
  1341. ColumnFamilyData* PickCompactionFromQueue(
  1342. std::unique_ptr<TaskLimiterToken>* token, LogBuffer* log_buffer);
  1343. // helper function to call after some of the logs_ were synced
  1344. void MarkLogsSynced(uint64_t up_to, bool synced_dir, const Status& status);
  1345. SnapshotImpl* GetSnapshotImpl(bool is_write_conflict_boundary,
  1346. bool lock = true);
  1347. uint64_t GetMaxTotalWalSize() const;
  1348. Directory* GetDataDir(ColumnFamilyData* cfd, size_t path_id) const;
  1349. Status CloseHelper();
  1350. void WaitForBackgroundWork();
  1351. // Background threads call this function, which is just a wrapper around
  1352. // the InstallSuperVersion() function. Background threads carry
  1353. // sv_context which can have new_superversion already
  1354. // allocated.
  1355. // All ColumnFamily state changes go through this function. Here we analyze
  1356. // the new state and we schedule background work if we detect that the new
  1357. // state needs flush or compaction.
  1358. void InstallSuperVersionAndScheduleWork(
  1359. ColumnFamilyData* cfd, SuperVersionContext* sv_context,
  1360. const MutableCFOptions& mutable_cf_options);
  1361. bool GetIntPropertyInternal(ColumnFamilyData* cfd,
  1362. const DBPropertyInfo& property_info,
  1363. bool is_locked, uint64_t* value);
  1364. bool GetPropertyHandleOptionsStatistics(std::string* value);
  1365. bool HasPendingManualCompaction();
  1366. bool HasExclusiveManualCompaction();
  1367. void AddManualCompaction(ManualCompactionState* m);
  1368. void RemoveManualCompaction(ManualCompactionState* m);
  1369. bool ShouldntRunManualCompaction(ManualCompactionState* m);
  1370. bool HaveManualCompaction(ColumnFamilyData* cfd);
  1371. bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1);
  1372. #ifndef ROCKSDB_LITE
  1373. void BuildCompactionJobInfo(const ColumnFamilyData* cfd, Compaction* c,
  1374. const Status& st,
  1375. const CompactionJobStats& compaction_job_stats,
  1376. const int job_id, const Version* current,
  1377. CompactionJobInfo* compaction_job_info) const;
  1378. // Reserve the next 'num' file numbers for to-be-ingested external SST files,
  1379. // and return the current file_number in 'next_file_number'.
  1380. // Write a version edit to the MANIFEST.
  1381. Status ReserveFileNumbersBeforeIngestion(
  1382. ColumnFamilyData* cfd, uint64_t num,
  1383. std::unique_ptr<std::list<uint64_t>::iterator>& pending_output_elem,
  1384. uint64_t* next_file_number);
  1385. #endif //! ROCKSDB_LITE
  1386. bool ShouldPurge(uint64_t file_number) const;
  1387. void MarkAsGrabbedForPurge(uint64_t file_number);
  1388. size_t GetWalPreallocateBlockSize(uint64_t write_buffer_size) const;
  1389. Env::WriteLifeTimeHint CalculateWALWriteHint() { return Env::WLTH_SHORT; }
  1390. Status CreateWAL(uint64_t log_file_num, uint64_t recycle_log_number,
  1391. size_t preallocate_block_size, log::Writer** new_log);
  1392. // Validate self-consistency of DB options
  1393. static Status ValidateOptions(const DBOptions& db_options);
  1394. // Validate self-consistency of DB options and its consistency with cf options
  1395. static Status ValidateOptions(
  1396. const DBOptions& db_options,
  1397. const std::vector<ColumnFamilyDescriptor>& column_families);
  1398. // Utility function to do some debug validation and sort the given vector
  1399. // of MultiGet keys
  1400. void PrepareMultiGetKeys(
  1401. const size_t num_keys, bool sorted,
  1402. autovector<KeyContext*, MultiGetContext::MAX_BATCH_SIZE>* key_ptrs);
  1403. // A structure to hold the information required to process MultiGet of keys
  1404. // belonging to one column family. For a multi column family MultiGet, there
  1405. // will be a container of these objects.
  1406. struct MultiGetColumnFamilyData {
  1407. ColumnFamilyHandle* cf;
  1408. ColumnFamilyData* cfd;
  1409. // For the batched MultiGet which relies on sorted keys, start specifies
  1410. // the index of first key belonging to this column family in the sorted
  1411. // list.
  1412. size_t start;
  1413. // For the batched MultiGet case, num_keys specifies the number of keys
  1414. // belonging to this column family in the sorted list
  1415. size_t num_keys;
  1416. // SuperVersion for the column family obtained in a manner that ensures a
  1417. // consistent view across all column families in the DB
  1418. SuperVersion* super_version;
  1419. MultiGetColumnFamilyData(ColumnFamilyHandle* column_family,
  1420. SuperVersion* sv)
  1421. : cf(column_family),
  1422. cfd(static_cast<ColumnFamilyHandleImpl*>(cf)->cfd()),
  1423. start(0),
  1424. num_keys(0),
  1425. super_version(sv) {}
  1426. MultiGetColumnFamilyData(ColumnFamilyHandle* column_family, size_t first,
  1427. size_t count, SuperVersion* sv)
  1428. : cf(column_family),
  1429. cfd(static_cast<ColumnFamilyHandleImpl*>(cf)->cfd()),
  1430. start(first),
  1431. num_keys(count),
  1432. super_version(sv) {}
  1433. MultiGetColumnFamilyData() = default;
  1434. };
  1435. // A common function to obtain a consistent snapshot, which can be implicit
  1436. // if the user doesn't specify a snapshot in read_options, across
  1437. // multiple column families for MultiGet. It will attempt to get an implicit
  1438. // snapshot without acquiring the db_mutes, but will give up after a few
  1439. // tries and acquire the mutex if a memtable flush happens. The template
  1440. // allows both the batched and non-batched MultiGet to call this with
  1441. // either an std::unordered_map or autovector of column families.
  1442. //
  1443. // If callback is non-null, the callback is refreshed with the snapshot
  1444. // sequence number
  1445. //
  1446. // A return value of true indicates that the SuperVersions were obtained
  1447. // from the ColumnFamilyData, whereas false indicates they are thread
  1448. // local
  1449. template <class T>
  1450. bool MultiCFSnapshot(
  1451. const ReadOptions& read_options, ReadCallback* callback,
  1452. std::function<MultiGetColumnFamilyData*(typename T::iterator&)>&
  1453. iter_deref_func,
  1454. T* cf_list, SequenceNumber* snapshot);
  1455. // The actual implementation of the batching MultiGet. The caller is expected
  1456. // to have acquired the SuperVersion and pass in a snapshot sequence number
  1457. // in order to construct the LookupKeys. The start_key and num_keys specify
  1458. // the range of keys in the sorted_keys vector for a single column family.
  1459. void MultiGetImpl(
  1460. const ReadOptions& read_options, size_t start_key, size_t num_keys,
  1461. autovector<KeyContext*, MultiGetContext::MAX_BATCH_SIZE>* sorted_keys,
  1462. SuperVersion* sv, SequenceNumber snap_seqnum, ReadCallback* callback,
  1463. bool* is_blob_index);
  1464. // table_cache_ provides its own synchronization
  1465. std::shared_ptr<Cache> table_cache_;
  1466. // Lock over the persistent DB state. Non-nullptr iff successfully acquired.
  1467. FileLock* db_lock_;
  1468. // In addition to mutex_, log_write_mutex_ protected writes to stats_history_
  1469. InstrumentedMutex stats_history_mutex_;
  1470. // In addition to mutex_, log_write_mutex_ protected writes to logs_ and
  1471. // logfile_number_. With two_write_queues it also protects alive_log_files_,
  1472. // and log_empty_. Refer to the definition of each variable below for more
  1473. // details.
  1474. // Note: to avoid dealock, if needed to acquire both log_write_mutex_ and
  1475. // mutex_, the order should be first mutex_ and then log_write_mutex_.
  1476. InstrumentedMutex log_write_mutex_;
  1477. std::atomic<bool> shutting_down_;
  1478. std::atomic<bool> manual_compaction_paused_;
  1479. // This condition variable is signaled on these conditions:
  1480. // * whenever bg_compaction_scheduled_ goes down to 0
  1481. // * if AnyManualCompaction, whenever a compaction finishes, even if it hasn't
  1482. // made any progress
  1483. // * whenever a compaction made any progress
  1484. // * whenever bg_flush_scheduled_ or bg_purge_scheduled_ value decreases
  1485. // (i.e. whenever a flush is done, even if it didn't make any progress)
  1486. // * whenever there is an error in background purge, flush or compaction
  1487. // * whenever num_running_ingest_file_ goes to 0.
  1488. // * whenever pending_purge_obsolete_files_ goes to 0.
  1489. // * whenever disable_delete_obsolete_files_ goes to 0.
  1490. // * whenever SetOptions successfully updates options.
  1491. // * whenever a column family is dropped.
  1492. InstrumentedCondVar bg_cv_;
  1493. // Writes are protected by locking both mutex_ and log_write_mutex_, and reads
  1494. // must be under either mutex_ or log_write_mutex_. Since after ::Open,
  1495. // logfile_number_ is currently updated only in write_thread_, it can be read
  1496. // from the same write_thread_ without any locks.
  1497. uint64_t logfile_number_;
  1498. std::deque<uint64_t>
  1499. log_recycle_files_; // a list of log files that we can recycle
  1500. bool log_dir_synced_;
  1501. // Without two_write_queues, read and writes to log_empty_ are protected by
  1502. // mutex_. Since it is currently updated/read only in write_thread_, it can be
  1503. // accessed from the same write_thread_ without any locks. With
  1504. // two_write_queues writes, where it can be updated in different threads,
  1505. // read and writes are protected by log_write_mutex_ instead. This is to avoid
  1506. // expesnive mutex_ lock during WAL write, which update log_empty_.
  1507. bool log_empty_;
  1508. ColumnFamilyHandleImpl* persist_stats_cf_handle_;
  1509. bool persistent_stats_cfd_exists_ = true;
  1510. // Without two_write_queues, read and writes to alive_log_files_ are
  1511. // protected by mutex_. However since back() is never popped, and push_back()
  1512. // is done only from write_thread_, the same thread can access the item
  1513. // reffered by back() without mutex_. With two_write_queues_, writes
  1514. // are protected by locking both mutex_ and log_write_mutex_, and reads must
  1515. // be under either mutex_ or log_write_mutex_.
  1516. std::deque<LogFileNumberSize> alive_log_files_;
  1517. // Log files that aren't fully synced, and the current log file.
  1518. // Synchronization:
  1519. // - push_back() is done from write_thread_ with locked mutex_ and
  1520. // log_write_mutex_
  1521. // - pop_front() is done from any thread with locked mutex_ and
  1522. // log_write_mutex_
  1523. // - reads are done with either locked mutex_ or log_write_mutex_
  1524. // - back() and items with getting_synced=true are not popped,
  1525. // - The same thread that sets getting_synced=true will reset it.
  1526. // - it follows that the object referred by back() can be safely read from
  1527. // the write_thread_ without using mutex
  1528. // - it follows that the items with getting_synced=true can be safely read
  1529. // from the same thread that has set getting_synced=true
  1530. std::deque<LogWriterNumber> logs_;
  1531. // Signaled when getting_synced becomes false for some of the logs_.
  1532. InstrumentedCondVar log_sync_cv_;
  1533. // This is the app-level state that is written to the WAL but will be used
  1534. // only during recovery. Using this feature enables not writing the state to
  1535. // memtable on normal writes and hence improving the throughput. Each new
  1536. // write of the state will replace the previous state entirely even if the
  1537. // keys in the two consecuitive states do not overlap.
  1538. // It is protected by log_write_mutex_ when two_write_queues_ is enabled.
  1539. // Otherwise only the heaad of write_thread_ can access it.
  1540. WriteBatch cached_recoverable_state_;
  1541. std::atomic<bool> cached_recoverable_state_empty_ = {true};
  1542. std::atomic<uint64_t> total_log_size_;
  1543. // If this is non-empty, we need to delete these log files in background
  1544. // threads. Protected by db mutex.
  1545. autovector<log::Writer*> logs_to_free_;
  1546. bool is_snapshot_supported_;
  1547. std::map<uint64_t, std::map<std::string, uint64_t>> stats_history_;
  1548. std::map<std::string, uint64_t> stats_slice_;
  1549. bool stats_slice_initialized_ = false;
  1550. Directories directories_;
  1551. WriteBufferManager* write_buffer_manager_;
  1552. WriteThread write_thread_;
  1553. WriteBatch tmp_batch_;
  1554. // The write thread when the writers have no memtable write. This will be used
  1555. // in 2PC to batch the prepares separately from the serial commit.
  1556. WriteThread nonmem_write_thread_;
  1557. WriteController write_controller_;
  1558. // Size of the last batch group. In slowdown mode, next write needs to
  1559. // sleep if it uses up the quota.
  1560. // Note: This is to protect memtable and compaction. If the batch only writes
  1561. // to the WAL its size need not to be included in this.
  1562. uint64_t last_batch_group_size_;
  1563. FlushScheduler flush_scheduler_;
  1564. TrimHistoryScheduler trim_history_scheduler_;
  1565. SnapshotList snapshots_;
  1566. // For each background job, pending_outputs_ keeps the current file number at
  1567. // the time that background job started.
  1568. // FindObsoleteFiles()/PurgeObsoleteFiles() never deletes any file that has
  1569. // number bigger than any of the file number in pending_outputs_. Since file
  1570. // numbers grow monotonically, this also means that pending_outputs_ is always
  1571. // sorted. After a background job is done executing, its file number is
  1572. // deleted from pending_outputs_, which allows PurgeObsoleteFiles() to clean
  1573. // it up.
  1574. // State is protected with db mutex.
  1575. std::list<uint64_t> pending_outputs_;
  1576. // flush_queue_ and compaction_queue_ hold column families that we need to
  1577. // flush and compact, respectively.
  1578. // A column family is inserted into flush_queue_ when it satisfies condition
  1579. // cfd->imm()->IsFlushPending()
  1580. // A column family is inserted into compaction_queue_ when it satisfied
  1581. // condition cfd->NeedsCompaction()
  1582. // Column families in this list are all Ref()-erenced
  1583. // TODO(icanadi) Provide some kind of ReferencedColumnFamily class that will
  1584. // do RAII on ColumnFamilyData
  1585. // Column families are in this queue when they need to be flushed or
  1586. // compacted. Consumers of these queues are flush and compaction threads. When
  1587. // column family is put on this queue, we increase unscheduled_flushes_ and
  1588. // unscheduled_compactions_. When these variables are bigger than zero, that
  1589. // means we need to schedule background threads for flush and compaction.
  1590. // Once the background threads are scheduled, we decrease unscheduled_flushes_
  1591. // and unscheduled_compactions_. That way we keep track of number of
  1592. // compaction and flush threads we need to schedule. This scheduling is done
  1593. // in MaybeScheduleFlushOrCompaction()
  1594. // invariant(column family present in flush_queue_ <==>
  1595. // ColumnFamilyData::pending_flush_ == true)
  1596. std::deque<FlushRequest> flush_queue_;
  1597. // invariant(column family present in compaction_queue_ <==>
  1598. // ColumnFamilyData::pending_compaction_ == true)
  1599. std::deque<ColumnFamilyData*> compaction_queue_;
  1600. // A map to store file numbers and filenames of the files to be purged
  1601. std::unordered_map<uint64_t, PurgeFileInfo> purge_files_;
  1602. // A vector to store the file numbers that have been assigned to certain
  1603. // JobContext. Current implementation tracks ssts only.
  1604. std::unordered_set<uint64_t> files_grabbed_for_purge_;
  1605. // A queue to store log writers to close
  1606. std::deque<log::Writer*> logs_to_free_queue_;
  1607. std::deque<SuperVersion*> superversions_to_free_queue_;
  1608. int unscheduled_flushes_;
  1609. int unscheduled_compactions_;
  1610. // count how many background compactions are running or have been scheduled in
  1611. // the BOTTOM pool
  1612. int bg_bottom_compaction_scheduled_;
  1613. // count how many background compactions are running or have been scheduled
  1614. int bg_compaction_scheduled_;
  1615. // stores the number of compactions are currently running
  1616. int num_running_compactions_;
  1617. // number of background memtable flush jobs, submitted to the HIGH pool
  1618. int bg_flush_scheduled_;
  1619. // stores the number of flushes are currently running
  1620. int num_running_flushes_;
  1621. // number of background obsolete file purge jobs, submitted to the HIGH pool
  1622. int bg_purge_scheduled_;
  1623. std::deque<ManualCompactionState*> manual_compaction_dequeue_;
  1624. // shall we disable deletion of obsolete files
  1625. // if 0 the deletion is enabled.
  1626. // if non-zero, files will not be getting deleted
  1627. // This enables two different threads to call
  1628. // EnableFileDeletions() and DisableFileDeletions()
  1629. // without any synchronization
  1630. int disable_delete_obsolete_files_;
  1631. // Number of times FindObsoleteFiles has found deletable files and the
  1632. // corresponding call to PurgeObsoleteFiles has not yet finished.
  1633. int pending_purge_obsolete_files_;
  1634. // last time when DeleteObsoleteFiles with full scan was executed. Originally
  1635. // initialized with startup time.
  1636. uint64_t delete_obsolete_files_last_run_;
  1637. // last time stats were dumped to LOG
  1638. std::atomic<uint64_t> last_stats_dump_time_microsec_;
  1639. // The thread that wants to switch memtable, can wait on this cv until the
  1640. // pending writes to memtable finishes.
  1641. std::condition_variable switch_cv_;
  1642. // The mutex used by switch_cv_. mutex_ should be acquired beforehand.
  1643. std::mutex switch_mutex_;
  1644. // Number of threads intending to write to memtable
  1645. std::atomic<size_t> pending_memtable_writes_ = {};
  1646. // Each flush or compaction gets its own job id. this counter makes sure
  1647. // they're unique
  1648. std::atomic<int> next_job_id_;
  1649. // A flag indicating whether the current rocksdb database has any
  1650. // data that is not yet persisted into either WAL or SST file.
  1651. // Used when disableWAL is true.
  1652. std::atomic<bool> has_unpersisted_data_;
  1653. // if an attempt was made to flush all column families that
  1654. // the oldest log depends on but uncommitted data in the oldest
  1655. // log prevents the log from being released.
  1656. // We must attempt to free the dependent memtables again
  1657. // at a later time after the transaction in the oldest
  1658. // log is fully commited.
  1659. bool unable_to_release_oldest_log_;
  1660. static const int KEEP_LOG_FILE_NUM = 1000;
  1661. // MSVC version 1800 still does not have constexpr for ::max()
  1662. static const uint64_t kNoTimeOut = port::kMaxUint64;
  1663. std::string db_absolute_path_;
  1664. // Number of running IngestExternalFile() or CreateColumnFamilyWithImport()
  1665. // calls.
  1666. // REQUIRES: mutex held
  1667. int num_running_ingest_file_;
  1668. #ifndef ROCKSDB_LITE
  1669. WalManager wal_manager_;
  1670. #endif // ROCKSDB_LITE
  1671. // Unified interface for logging events
  1672. EventLogger event_logger_;
  1673. // A value of > 0 temporarily disables scheduling of background work
  1674. int bg_work_paused_;
  1675. // A value of > 0 temporarily disables scheduling of background compaction
  1676. int bg_compaction_paused_;
  1677. // Guard against multiple concurrent refitting
  1678. bool refitting_level_;
  1679. // Indicate DB was opened successfully
  1680. bool opened_successfully_;
  1681. // The min threshold to triggere bottommost compaction for removing
  1682. // garbages, among all column families.
  1683. SequenceNumber bottommost_files_mark_threshold_ = kMaxSequenceNumber;
  1684. LogsWithPrepTracker logs_with_prep_tracker_;
  1685. // Callback for compaction to check if a key is visible to a snapshot.
  1686. // REQUIRES: mutex held
  1687. std::unique_ptr<SnapshotChecker> snapshot_checker_;
  1688. // Callback for when the cached_recoverable_state_ is written to memtable
  1689. // Only to be set during initialization
  1690. std::unique_ptr<PreReleaseCallback> recoverable_state_pre_release_callback_;
  1691. // handle for scheduling stats dumping at fixed intervals
  1692. // REQUIRES: mutex locked
  1693. std::unique_ptr<ROCKSDB_NAMESPACE::RepeatableThread> thread_dump_stats_;
  1694. // handle for scheduling stats snapshoting at fixed intervals
  1695. // REQUIRES: mutex locked
  1696. std::unique_ptr<ROCKSDB_NAMESPACE::RepeatableThread> thread_persist_stats_;
  1697. // When set, we use a separate queue for writes that dont write to memtable.
  1698. // In 2PC these are the writes at Prepare phase.
  1699. const bool two_write_queues_;
  1700. const bool manual_wal_flush_;
  1701. // LastSequence also indicates last published sequence visibile to the
  1702. // readers. Otherwise LastPublishedSequence should be used.
  1703. const bool last_seq_same_as_publish_seq_;
  1704. // It indicates that a customized gc algorithm must be used for
  1705. // flush/compaction and if it is not provided vis SnapshotChecker, we should
  1706. // disable gc to be safe.
  1707. const bool use_custom_gc_;
  1708. // Flag to indicate that the DB instance shutdown has been initiated. This
  1709. // different from shutting_down_ atomic in that it is set at the beginning
  1710. // of shutdown sequence, specifically in order to prevent any background
  1711. // error recovery from going on in parallel. The latter, shutting_down_,
  1712. // is set a little later during the shutdown after scheduling memtable
  1713. // flushes
  1714. std::atomic<bool> shutdown_initiated_;
  1715. // Flag to indicate whether sst_file_manager object was allocated in
  1716. // DB::Open() or passed to us
  1717. bool own_sfm_;
  1718. // Clients must periodically call SetPreserveDeletesSequenceNumber()
  1719. // to advance this seqnum. Default value is 0 which means ALL deletes are
  1720. // preserved. Note that this has no effect if DBOptions.preserve_deletes
  1721. // is set to false.
  1722. std::atomic<SequenceNumber> preserve_deletes_seqnum_;
  1723. const bool preserve_deletes_;
  1724. // Flag to check whether Close() has been called on this DB
  1725. bool closed_;
  1726. ErrorHandler error_handler_;
  1727. // Conditional variable to coordinate installation of atomic flush results.
  1728. // With atomic flush, each bg thread installs the result of flushing multiple
  1729. // column families, and different threads can flush different column
  1730. // families. It's difficult to rely on one thread to perform batch
  1731. // installation for all threads. This is different from the non-atomic flush
  1732. // case.
  1733. // atomic_flush_install_cv_ makes sure that threads install atomic flush
  1734. // results sequentially. Flush results of memtables with lower IDs get
  1735. // installed to MANIFEST first.
  1736. InstrumentedCondVar atomic_flush_install_cv_;
  1737. bool wal_in_db_path_;
  1738. };
  1739. extern Options SanitizeOptions(const std::string& db, const Options& src);
  1740. extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
  1741. extern CompressionType GetCompressionFlush(
  1742. const ImmutableCFOptions& ioptions,
  1743. const MutableCFOptions& mutable_cf_options);
  1744. // Return the earliest log file to keep after the memtable flush is
  1745. // finalized.
  1746. // `cfd_to_flush` is the column family whose memtable (specified in
  1747. // `memtables_to_flush`) will be flushed and thus will not depend on any WAL
  1748. // file.
  1749. // The function is only applicable to 2pc mode.
  1750. extern uint64_t PrecomputeMinLogNumberToKeep(
  1751. VersionSet* vset, const ColumnFamilyData& cfd_to_flush,
  1752. autovector<VersionEdit*> edit_list,
  1753. const autovector<MemTable*>& memtables_to_flush,
  1754. LogsWithPrepTracker* prep_tracker);
  1755. // `cfd_to_flush` is the column family whose memtable will be flushed and thus
  1756. // will not depend on any WAL file. nullptr means no memtable is being flushed.
  1757. // The function is only applicable to 2pc mode.
  1758. extern uint64_t FindMinPrepLogReferencedByMemTable(
  1759. VersionSet* vset, const ColumnFamilyData* cfd_to_flush,
  1760. const autovector<MemTable*>& memtables_to_flush);
  1761. // Fix user-supplied options to be reasonable
  1762. template <class T, class V>
  1763. static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
  1764. if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  1765. if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
  1766. }
  1767. } // namespace ROCKSDB_NAMESPACE