memtable_list.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. #pragma once
  7. #include <deque>
  8. #include <limits>
  9. #include <list>
  10. #include <set>
  11. #include <string>
  12. #include <vector>
  13. #include "db/logs_with_prep_tracker.h"
  14. #include "db/memtable.h"
  15. #include "db/range_del_aggregator.h"
  16. #include "file/filename.h"
  17. #include "logging/log_buffer.h"
  18. #include "monitoring/instrumented_mutex.h"
  19. #include "rocksdb/db.h"
  20. #include "rocksdb/iterator.h"
  21. #include "rocksdb/options.h"
  22. #include "rocksdb/types.h"
  23. #include "util/autovector.h"
  24. namespace ROCKSDB_NAMESPACE {
  25. class ColumnFamilyData;
  26. class InternalKeyComparator;
  27. class InstrumentedMutex;
  28. class MergeIteratorBuilder;
  29. class MemTableList;
  30. struct FlushJobInfo;
  31. // keeps a list of immutable memtables (ReadOnlyMemtable*) in a vector.
  32. // The list is immutable if refcount is bigger than one. It is used as
  33. // a state for Get() and iterator code paths.
  34. //
  35. // This class is not thread-safe. External synchronization is required
  36. // (such as holding the db mutex or being on the write thread).
  37. class MemTableListVersion {
  38. public:
  39. explicit MemTableListVersion(size_t* parent_memtable_list_memory_usage,
  40. const MemTableListVersion& old);
  41. explicit MemTableListVersion(size_t* parent_memtable_list_memory_usage,
  42. int64_t max_write_buffer_size_to_maintain);
  43. void Ref();
  44. void Unref(autovector<ReadOnlyMemTable*>* to_delete = nullptr);
  45. // Search all the memtables starting from the most recent one.
  46. // Return the most recent value found, if any.
  47. //
  48. // If any operation was found for this key, its most recent sequence number
  49. // will be stored in *seq on success (regardless of whether true/false is
  50. // returned). Otherwise, *seq will be set to kMaxSequenceNumber.
  51. bool Get(const LookupKey& key, std::string* value,
  52. PinnableWideColumns* columns, std::string* timestamp, Status* s,
  53. MergeContext* merge_context,
  54. SequenceNumber* max_covering_tombstone_seq, SequenceNumber* seq,
  55. const ReadOptions& read_opts, ReadCallback* callback = nullptr,
  56. bool* is_blob_index = nullptr);
  57. bool Get(const LookupKey& key, std::string* value,
  58. PinnableWideColumns* columns, std::string* timestamp, Status* s,
  59. MergeContext* merge_context,
  60. SequenceNumber* max_covering_tombstone_seq,
  61. const ReadOptions& read_opts, ReadCallback* callback = nullptr,
  62. bool* is_blob_index = nullptr) {
  63. SequenceNumber seq;
  64. return Get(key, value, columns, timestamp, s, merge_context,
  65. max_covering_tombstone_seq, &seq, read_opts, callback,
  66. is_blob_index);
  67. }
  68. void MultiGet(const ReadOptions& read_options, MultiGetRange* range,
  69. ReadCallback* callback);
  70. // Returns all the merge operands corresponding to the key by searching all
  71. // memtables starting from the most recent one.
  72. bool GetMergeOperands(const LookupKey& key, Status* s,
  73. MergeContext* merge_context,
  74. SequenceNumber* max_covering_tombstone_seq,
  75. const ReadOptions& read_opts);
  76. // Similar to Get(), but searches the Memtable history of memtables that
  77. // have already been flushed. Should only be used from in-memory only
  78. // queries (such as Transaction validation) as the history may contain
  79. // writes that are also present in the SST files.
  80. bool GetFromHistory(const LookupKey& key, std::string* value,
  81. PinnableWideColumns* columns, std::string* timestamp,
  82. Status* s, MergeContext* merge_context,
  83. SequenceNumber* max_covering_tombstone_seq,
  84. SequenceNumber* seq, const ReadOptions& read_opts,
  85. bool* is_blob_index = nullptr);
  86. bool GetFromHistory(const LookupKey& key, std::string* value,
  87. PinnableWideColumns* columns, std::string* timestamp,
  88. Status* s, MergeContext* merge_context,
  89. SequenceNumber* max_covering_tombstone_seq,
  90. const ReadOptions& read_opts,
  91. bool* is_blob_index = nullptr) {
  92. SequenceNumber seq;
  93. return GetFromHistory(key, value, columns, timestamp, s, merge_context,
  94. max_covering_tombstone_seq, &seq, read_opts,
  95. is_blob_index);
  96. }
  97. Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena,
  98. RangeDelAggregator* range_del_agg);
  99. void AddIterators(const ReadOptions& options,
  100. UnownedPtr<const SeqnoToTimeMapping> seqno_to_time_mapping,
  101. const SliceTransform* prefix_extractor,
  102. std::vector<InternalIterator*>* iterator_list,
  103. Arena* arena);
  104. void AddIterators(const ReadOptions& options,
  105. UnownedPtr<const SeqnoToTimeMapping> seqno_to_time_mapping,
  106. const SliceTransform* prefix_extractor,
  107. MergeIteratorBuilder* merge_iter_builder,
  108. bool add_range_tombstone_iter);
  109. uint64_t GetTotalNumEntries() const;
  110. uint64_t GetTotalNumDeletes() const;
  111. ReadOnlyMemTable::MemTableStats ApproximateStats(const Slice& start_ikey,
  112. const Slice& end_ikey) const;
  113. // Returns the value of MemTable::GetEarliestSequenceNumber() on the most
  114. // recent MemTable in this list or kMaxSequenceNumber if the list is empty.
  115. // If include_history=true, will also search Memtables in MemTableList
  116. // History.
  117. SequenceNumber GetEarliestSequenceNumber(bool include_history = false) const;
  118. // Return the first sequence number from the memtable list, which is the
  119. // smallest sequence number of all FirstSequenceNumber.
  120. // Return kMaxSequenceNumber if the list is empty.
  121. SequenceNumber GetFirstSequenceNumber() const;
  122. // REQUIRES: db_mutex held.
  123. void SetID(uint64_t id) { id_ = id; }
  124. uint64_t GetID() const { return id_; }
  125. int NumNotFlushed() const { return static_cast<int>(memlist_.size()); }
  126. int NumFlushed() const { return static_cast<int>(memlist_history_.size()); }
  127. // Gets the newest user defined timestamps from the immutable memtables.
  128. // This returns the newest user defined timestamp found in the most recent
  129. // immutable memtable. This should only be called when user defined timestamp
  130. // is enabled.
  131. const Slice& GetNewestUDT() const;
  132. private:
  133. friend class MemTableList;
  134. friend Status InstallMemtableAtomicFlushResults(
  135. const autovector<MemTableList*>* imm_lists,
  136. const autovector<ColumnFamilyData*>& cfds,
  137. const autovector<const autovector<ReadOnlyMemTable*>*>& mems_list,
  138. VersionSet* vset, LogsWithPrepTracker* prep_tracker,
  139. InstrumentedMutex* mu, const autovector<FileMetaData*>& file_meta,
  140. const autovector<std::list<std::unique_ptr<FlushJobInfo>>*>&
  141. committed_flush_jobs_info,
  142. autovector<ReadOnlyMemTable*>* to_delete, FSDirectory* db_directory,
  143. LogBuffer* log_buffer);
  144. // REQUIRE: m is an immutable memtable
  145. void Add(ReadOnlyMemTable* m, autovector<ReadOnlyMemTable*>* to_delete);
  146. // REQUIRE: m is an immutable memtable
  147. void Remove(ReadOnlyMemTable* m, autovector<ReadOnlyMemTable*>* to_delete);
  148. // Return true if the memtable list should be trimmed to get memory usage
  149. // under budget.
  150. bool HistoryShouldBeTrimmed(size_t usage);
  151. // Trim history, Return true if memtable is trimmed
  152. bool TrimHistory(autovector<ReadOnlyMemTable*>* to_delete, size_t usage);
  153. bool GetFromList(std::list<ReadOnlyMemTable*>* list, const LookupKey& key,
  154. std::string* value, PinnableWideColumns* columns,
  155. std::string* timestamp, Status* s,
  156. MergeContext* merge_context,
  157. SequenceNumber* max_covering_tombstone_seq,
  158. SequenceNumber* seq, const ReadOptions& read_opts,
  159. ReadCallback* callback = nullptr,
  160. bool* is_blob_index = nullptr);
  161. void AddMemTable(ReadOnlyMemTable* m);
  162. void UnrefMemTable(autovector<ReadOnlyMemTable*>* to_delete,
  163. ReadOnlyMemTable* m);
  164. // Calculate the total amount of memory used by memlist_ and memlist_history_
  165. // excluding the last MemTable in memlist_history_. The reason for excluding
  166. // the last MemTable is to see if dropping the last MemTable will keep total
  167. // memory usage above or equal to max_write_buffer_size_to_maintain_
  168. size_t MemoryAllocatedBytesExcludingLast() const;
  169. // Whether this version contains flushed memtables that are only kept around
  170. // for transaction conflict checking.
  171. bool HasHistory() const { return !memlist_history_.empty(); }
  172. bool MemtableLimitExceeded(size_t usage);
  173. // Immutable MemTables that have not yet been flushed.
  174. std::list<ReadOnlyMemTable*> memlist_;
  175. // MemTables that have already been flushed
  176. // (used during Transaction validation)
  177. std::list<ReadOnlyMemTable*> memlist_history_;
  178. // Maximum size of MemTables to keep in memory (including both flushed
  179. // and not-yet-flushed tables).
  180. const int64_t max_write_buffer_size_to_maintain_;
  181. int refs_ = 0;
  182. size_t* parent_memtable_list_memory_usage_;
  183. // MemtableListVersion id to track for flush results checking.
  184. uint64_t id_ = 0;
  185. };
  186. // This class stores references to all the immutable memtables.
  187. // The memtables are flushed to L0 as soon as possible and in
  188. // any order. If there are more than one immutable memtable, their
  189. // flushes can occur concurrently. However, they are 'committed'
  190. // to the manifest in FIFO order to maintain correctness and
  191. // recoverability from a crash.
  192. //
  193. //
  194. // Other than imm_flush_needed and imm_trim_needed, this class is not
  195. // thread-safe and requires external synchronization (such as holding the db
  196. // mutex or being on the write thread.)
  197. class MemTableList {
  198. public:
  199. // A list of memtables.
  200. explicit MemTableList(int min_write_buffer_number_to_merge,
  201. int64_t max_write_buffer_size_to_maintain)
  202. : imm_flush_needed(false),
  203. imm_trim_needed(false),
  204. min_write_buffer_number_to_merge_(min_write_buffer_number_to_merge),
  205. current_(new MemTableListVersion(&current_memory_usage_,
  206. max_write_buffer_size_to_maintain)),
  207. num_flush_not_started_(0),
  208. commit_in_progress_(false),
  209. flush_requested_(false),
  210. current_memory_usage_(0),
  211. current_memory_allocted_bytes_excluding_last_(0),
  212. current_has_history_(false),
  213. last_memtable_list_version_id_(0) {
  214. current_->Ref();
  215. }
  216. // Should not delete MemTableList without making sure MemTableList::current()
  217. // is Unref()'d.
  218. ~MemTableList() {}
  219. MemTableListVersion* current() const { return current_; }
  220. // so that background threads can detect non-nullptr pointer to
  221. // determine whether there is anything more to start flushing.
  222. std::atomic<bool> imm_flush_needed;
  223. std::atomic<bool> imm_trim_needed;
  224. // Returns the total number of memtables in the list that haven't yet
  225. // been flushed and logged.
  226. int NumNotFlushed() const;
  227. // Returns total number of memtables in the list that have been
  228. // completely flushed and logged.
  229. int NumFlushed() const;
  230. // Returns true if there is at least one memtable on which flush has
  231. // not yet started.
  232. bool IsFlushPending() const;
  233. // Returns true if there is at least one memtable that is pending flush or
  234. // flushing.
  235. bool IsFlushPendingOrRunning() const;
  236. // Returns the earliest memtables that needs to be flushed. The returned
  237. // memtables are guaranteed to be in the ascending order of created time.
  238. void PickMemtablesToFlush(uint64_t max_memtable_id,
  239. autovector<ReadOnlyMemTable*>* mems,
  240. uint64_t* max_next_log_number = nullptr);
  241. // Reset status of the given memtable list back to pending state so that
  242. // they can get picked up again on the next round of flush.
  243. //
  244. // @param rollback_succeeding_memtables If true, will rollback adjacent
  245. // younger memtables whose flush is completed. Specifically, suppose the
  246. // current immutable memtables are M_0,M_1...M_N ordered from youngest to
  247. // oldest. Suppose that the youngest memtable in `mems` is M_K. We will try to
  248. // rollback M_K-1, M_K-2... until the first memtable whose flush is
  249. // not completed. These are the memtables that would have been installed
  250. // by this flush job if it were to succeed. This flag is currently used
  251. // by non atomic_flush rollback.
  252. // Note that we also do rollback in `write_manifest_cb` by calling
  253. // `RemoveMemTablesOrRestoreFlags()`. There we rollback the entire batch so
  254. // it is similar to what we do here with rollback_succeeding_memtables=true.
  255. void RollbackMemtableFlush(const autovector<ReadOnlyMemTable*>& mems,
  256. bool rollback_succeeding_memtables);
  257. // Try commit a successful flush in the manifest file. It might just return
  258. // Status::OK letting a concurrent flush to do the actual the recording.
  259. Status TryInstallMemtableFlushResults(
  260. ColumnFamilyData* cfd, const autovector<ReadOnlyMemTable*>& m,
  261. LogsWithPrepTracker* prep_tracker, VersionSet* vset,
  262. InstrumentedMutex* mu, uint64_t file_number,
  263. autovector<ReadOnlyMemTable*>* to_delete, FSDirectory* db_directory,
  264. LogBuffer* log_buffer,
  265. std::list<std::unique_ptr<FlushJobInfo>>* committed_flush_jobs_info,
  266. bool write_edits = true);
  267. // New memtables are inserted at the front of the list.
  268. // Takes ownership of the referenced held on *m by the caller of Add().
  269. // By default, adding memtables will flag that the memtable list needs to be
  270. // flushed, but in certain situations, like after a mempurge, we may want to
  271. // avoid flushing the memtable list upon addition of a memtable.
  272. void Add(ReadOnlyMemTable* m, autovector<ReadOnlyMemTable*>* to_delete);
  273. // Returns an estimate of the number of bytes of data in use.
  274. size_t ApproximateMemoryUsage();
  275. // Returns the cached current_memory_allocted_bytes_excluding_last_ value.
  276. size_t MemoryAllocatedBytesExcludingLast() const;
  277. // Returns the cached current_has_history_ value.
  278. bool HasHistory() const;
  279. // Updates current_memory_allocted_bytes_excluding_last_ and
  280. // current_has_history_ from MemTableListVersion. Must be called whenever
  281. // InstallNewVersion is called.
  282. void UpdateCachedValuesFromMemTableListVersion();
  283. // `usage` is the current size of the mutable Memtable. When
  284. // max_write_buffer_size_to_maintain is used, total size of mutable and
  285. // immutable memtables is checked against it to decide whether to trim
  286. // memtable list.
  287. //
  288. // Return true if memtable is trimmed
  289. bool TrimHistory(autovector<ReadOnlyMemTable*>* to_delete, size_t usage);
  290. // Returns an estimate of the number of bytes of data used by
  291. // the unflushed mem-tables.
  292. size_t ApproximateUnflushedMemTablesMemoryUsage();
  293. // Returns an estimate of the timestamp of the earliest key.
  294. uint64_t ApproximateOldestKeyTime() const;
  295. // Request a flush of all existing memtables to storage. This will
  296. // cause future calls to IsFlushPending() to return true if this list is
  297. // non-empty (regardless of the min_write_buffer_number_to_merge
  298. // parameter). This flush request will persist until the next time
  299. // PickMemtablesToFlush() is called.
  300. void FlushRequested() {
  301. flush_requested_ = true;
  302. // If there are some memtables stored in imm() that don't trigger
  303. // flush (eg: mempurge output memtable), then update imm_flush_needed.
  304. // Note: if race condition and imm_flush_needed is set to true
  305. // when there is num_flush_not_started_==0, then there is no
  306. // impact whatsoever. Imm_flush_needed is only used in an assert
  307. // in IsFlushPending().
  308. if (num_flush_not_started_ > 0) {
  309. imm_flush_needed.store(true, std::memory_order_release);
  310. }
  311. }
  312. bool HasFlushRequested() { return flush_requested_; }
  313. // Returns true if a trim history should be scheduled and the caller should
  314. // be the one to schedule it
  315. bool MarkTrimHistoryNeeded() {
  316. auto expected = false;
  317. return imm_trim_needed.compare_exchange_strong(
  318. expected, true, std::memory_order_relaxed, std::memory_order_relaxed);
  319. }
  320. void ResetTrimHistoryNeeded() {
  321. auto expected = true;
  322. imm_trim_needed.compare_exchange_strong(
  323. expected, false, std::memory_order_relaxed, std::memory_order_relaxed);
  324. }
  325. // Copying allowed
  326. // MemTableList(const MemTableList&);
  327. // void operator=(const MemTableList&);
  328. size_t* current_memory_usage() { return &current_memory_usage_; }
  329. // Returns the WAL number of the oldest WAL that contains a prepared
  330. // transaction that corresponds to the content in this MemTableList,
  331. // after memtables listed in `memtables_to_flush` are flushed and their
  332. // status is persisted in manifest.
  333. uint64_t PrecomputeMinLogContainingPrepSection(
  334. const std::unordered_set<ReadOnlyMemTable*>* memtables_to_flush =
  335. nullptr) const;
  336. uint64_t GetEarliestMemTableID() const {
  337. auto& memlist = current_->memlist_;
  338. if (memlist.empty()) {
  339. return std::numeric_limits<uint64_t>::max();
  340. }
  341. return memlist.back()->GetID();
  342. }
  343. uint64_t GetLatestMemTableID(bool for_atomic_flush) const {
  344. auto& memlist = current_->memlist_;
  345. if (memlist.empty()) {
  346. return 0;
  347. }
  348. if (for_atomic_flush) {
  349. // Scan the memtable list from new to old
  350. for (auto it = memlist.begin(); it != memlist.end(); ++it) {
  351. ReadOnlyMemTable* m = *it;
  352. if (m->atomic_flush_seqno_ != kMaxSequenceNumber) {
  353. return m->GetID();
  354. }
  355. }
  356. return 0;
  357. }
  358. return memlist.front()->GetID();
  359. }
  360. // DB mutex held.
  361. // Gets the newest user-defined timestamp for the Memtables in ascending ID
  362. // order, up to the `max_memtable_id`. Used by background flush job
  363. // to check Memtables' eligibility for flush w.r.t retaining UDTs.
  364. std::vector<Slice> GetTablesNewestUDT(uint64_t max_memtable_id) {
  365. std::vector<Slice> newest_udts;
  366. auto& memlist = current_->memlist_;
  367. // Iterating through the memlist starting at the end, the vector<MemTable*>
  368. // ret is filled with memtables already sorted in increasing MemTable ID.
  369. for (auto it = memlist.rbegin(); it != memlist.rend(); ++it) {
  370. ReadOnlyMemTable* m = *it;
  371. if (m->GetID() > max_memtable_id) {
  372. break;
  373. }
  374. newest_udts.push_back(m->GetNewestUDT());
  375. }
  376. return newest_udts;
  377. }
  378. void AssignAtomicFlushSeq(const SequenceNumber& seq) {
  379. const auto& memlist = current_->memlist_;
  380. // Scan the memtable list from new to old
  381. for (auto it = memlist.begin(); it != memlist.end(); ++it) {
  382. ReadOnlyMemTable* mem = *it;
  383. if (mem->atomic_flush_seqno_ == kMaxSequenceNumber) {
  384. mem->atomic_flush_seqno_ = seq;
  385. } else {
  386. // Earlier memtables must have been assigned a atomic flush seq, no
  387. // need to continue scan.
  388. break;
  389. }
  390. }
  391. }
  392. // Used only by DBImplSecondary during log replay.
  393. // Remove memtables whose data were written before the WAL with log_number
  394. // was created, i.e. mem->GetNextLogNumber() <= log_number. The memtables are
  395. // not freed, but put into a vector for future deref and reclamation.
  396. void RemoveOldMemTables(uint64_t log_number,
  397. autovector<ReadOnlyMemTable*>* to_delete);
  398. // This API is only used by atomic date replacement. To get an edit for
  399. // dropping the current `MemTableListVersion`.
  400. VersionEdit GetEditForDroppingCurrentVersion(
  401. const ColumnFamilyData* cfd, VersionSet* vset,
  402. LogsWithPrepTracker* prep_tracker) const;
  403. private:
  404. friend Status InstallMemtableAtomicFlushResults(
  405. const autovector<MemTableList*>* imm_lists,
  406. const autovector<ColumnFamilyData*>& cfds,
  407. const autovector<const autovector<ReadOnlyMemTable*>*>& mems_list,
  408. VersionSet* vset, LogsWithPrepTracker* prep_tracker,
  409. InstrumentedMutex* mu, const autovector<FileMetaData*>& file_meta,
  410. const autovector<std::list<std::unique_ptr<FlushJobInfo>>*>&
  411. committed_flush_jobs_info,
  412. autovector<ReadOnlyMemTable*>* to_delete, FSDirectory* db_directory,
  413. LogBuffer* log_buffer);
  414. // DB mutex held
  415. void InstallNewVersion();
  416. // DB mutex held
  417. // Called after writing to MANIFEST
  418. void RemoveMemTablesOrRestoreFlags(const Status& s, ColumnFamilyData* cfd,
  419. size_t batch_count, LogBuffer* log_buffer,
  420. autovector<ReadOnlyMemTable*>* to_delete,
  421. InstrumentedMutex* mu);
  422. const int min_write_buffer_number_to_merge_;
  423. MemTableListVersion* current_;
  424. // the number of elements that still need flushing
  425. int num_flush_not_started_;
  426. // committing in progress
  427. bool commit_in_progress_;
  428. // Requested a flush of memtables to storage. It's possible to request that
  429. // a subset of memtables be flushed.
  430. bool flush_requested_;
  431. // The current memory usage.
  432. size_t current_memory_usage_;
  433. // Cached value of current_->MemoryAllocatedBytesExcludingLast().
  434. std::atomic<size_t> current_memory_allocted_bytes_excluding_last_;
  435. // Cached value of current_->HasHistory().
  436. std::atomic<bool> current_has_history_;
  437. // Last memtabe list version id, increase by 1 each time a new
  438. // MemtableListVersion is installed.
  439. uint64_t last_memtable_list_version_id_;
  440. };
  441. // Installs memtable atomic flush results.
  442. // In most cases, imm_lists is nullptr, and the function simply uses the
  443. // immutable memtable lists associated with the cfds. There are unit tests that
  444. // installs flush results for external immutable memtable lists other than the
  445. // cfds' own immutable memtable lists, e.g. MemTableLIstTest. In this case,
  446. // imm_lists parameter is not nullptr.
  447. Status InstallMemtableAtomicFlushResults(
  448. const autovector<MemTableList*>* imm_lists,
  449. const autovector<ColumnFamilyData*>& cfds,
  450. const autovector<const autovector<ReadOnlyMemTable*>*>& mems_list,
  451. VersionSet* vset, LogsWithPrepTracker* prep_tracker, InstrumentedMutex* mu,
  452. const autovector<FileMetaData*>& file_meta,
  453. const autovector<std::list<std::unique_ptr<FlushJobInfo>>*>&
  454. committed_flush_jobs_info,
  455. autovector<ReadOnlyMemTable*>* to_delete, FSDirectory* db_directory,
  456. LogBuffer* log_buffer);
  457. } // namespace ROCKSDB_NAMESPACE