compaction.cc 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "db/compaction/compaction.h"
  10. #include <cinttypes>
  11. #include <vector>
  12. #include "db/column_family.h"
  13. #include "db/dbformat.h"
  14. #include "logging/logging.h"
  15. #include "rocksdb/compaction_filter.h"
  16. #include "rocksdb/sst_partitioner.h"
  17. #include "test_util/sync_point.h"
  18. #include "util/string_util.h"
  19. namespace ROCKSDB_NAMESPACE {
  20. int sstableKeyCompare(const Comparator* uc, const Slice& a, const Slice& b) {
  21. auto c = uc->CompareWithoutTimestamp(ExtractUserKey(a), ExtractUserKey(b));
  22. if (c != 0) {
  23. return c;
  24. }
  25. auto a_footer = ExtractInternalKeyFooter(a);
  26. auto b_footer = ExtractInternalKeyFooter(b);
  27. if (a_footer == kRangeTombstoneSentinel) {
  28. if (b_footer != kRangeTombstoneSentinel) {
  29. return -1;
  30. }
  31. } else if (b_footer == kRangeTombstoneSentinel) {
  32. return 1;
  33. }
  34. return 0;
  35. }
  36. int sstableKeyCompare(const Comparator* user_cmp, const InternalKey* a,
  37. const InternalKey& b) {
  38. if (a == nullptr) {
  39. return -1;
  40. }
  41. return sstableKeyCompare(user_cmp, *a, b);
  42. }
  43. int sstableKeyCompare(const Comparator* user_cmp, const InternalKey& a,
  44. const InternalKey* b) {
  45. if (b == nullptr) {
  46. return -1;
  47. }
  48. return sstableKeyCompare(user_cmp, a, *b);
  49. }
  50. uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
  51. uint64_t sum = 0;
  52. for (size_t i = 0; i < files.size() && files[i]; i++) {
  53. sum += files[i]->fd.GetFileSize();
  54. }
  55. return sum;
  56. }
  57. // TODO(hx235): consider making this function part of the construction so we
  58. // don't forget to call it
  59. void Compaction::FinalizeInputInfo(Version* _input_version) {
  60. input_version_ = _input_version;
  61. cfd_ = input_version_->cfd();
  62. cfd_->Ref();
  63. input_version_->Ref();
  64. edit_.SetColumnFamily(cfd_->GetID());
  65. }
  66. void Compaction::GetBoundaryKeys(
  67. VersionStorageInfo* vstorage,
  68. const std::vector<CompactionInputFiles>& inputs, Slice* smallest_user_key,
  69. Slice* largest_user_key, int exclude_level) {
  70. bool initialized = false;
  71. const Comparator* ucmp = vstorage->InternalComparator()->user_comparator();
  72. for (size_t i = 0; i < inputs.size(); ++i) {
  73. if (inputs[i].files.empty() || inputs[i].level == exclude_level) {
  74. continue;
  75. }
  76. if (inputs[i].level == 0) {
  77. // we need to consider all files on level 0
  78. for (const auto* f : inputs[i].files) {
  79. const Slice& start_user_key = f->smallest.user_key();
  80. if (!initialized ||
  81. ucmp->Compare(start_user_key, *smallest_user_key) < 0) {
  82. *smallest_user_key = start_user_key;
  83. }
  84. const Slice& end_user_key = f->largest.user_key();
  85. if (!initialized ||
  86. ucmp->Compare(end_user_key, *largest_user_key) > 0) {
  87. *largest_user_key = end_user_key;
  88. }
  89. initialized = true;
  90. }
  91. } else {
  92. // we only need to consider the first and last file
  93. const Slice& start_user_key = inputs[i].files[0]->smallest.user_key();
  94. if (!initialized ||
  95. ucmp->Compare(start_user_key, *smallest_user_key) < 0) {
  96. *smallest_user_key = start_user_key;
  97. }
  98. const Slice& end_user_key = inputs[i].files.back()->largest.user_key();
  99. if (!initialized || ucmp->Compare(end_user_key, *largest_user_key) > 0) {
  100. *largest_user_key = end_user_key;
  101. }
  102. initialized = true;
  103. }
  104. }
  105. }
  106. void Compaction::GetBoundaryInternalKeys(
  107. VersionStorageInfo* vstorage,
  108. const std::vector<CompactionInputFiles>& inputs, InternalKey* smallest_key,
  109. InternalKey* largest_key, int exclude_level) {
  110. bool initialized = false;
  111. const InternalKeyComparator* icmp = vstorage->InternalComparator();
  112. for (size_t i = 0; i < inputs.size(); ++i) {
  113. if (inputs[i].files.empty() || inputs[i].level == exclude_level) {
  114. continue;
  115. }
  116. if (inputs[i].level == 0) {
  117. // we need to consider all files on level 0
  118. for (const auto* f : inputs[i].files) {
  119. if (!initialized || icmp->Compare(f->smallest, *smallest_key) < 0) {
  120. *smallest_key = f->smallest;
  121. }
  122. if (!initialized || icmp->Compare(f->largest, *largest_key) > 0) {
  123. *largest_key = f->largest;
  124. }
  125. initialized = true;
  126. }
  127. } else {
  128. // we only need to consider the first and last file
  129. if (!initialized ||
  130. icmp->Compare(inputs[i].files[0]->smallest, *smallest_key) < 0) {
  131. *smallest_key = inputs[i].files[0]->smallest;
  132. }
  133. if (!initialized ||
  134. icmp->Compare(inputs[i].files.back()->largest, *largest_key) > 0) {
  135. *largest_key = inputs[i].files.back()->largest;
  136. }
  137. initialized = true;
  138. }
  139. }
  140. }
  141. std::vector<CompactionInputFiles> Compaction::PopulateWithAtomicBoundaries(
  142. VersionStorageInfo* vstorage, std::vector<CompactionInputFiles> inputs) {
  143. const Comparator* ucmp = vstorage->InternalComparator()->user_comparator();
  144. for (size_t i = 0; i < inputs.size(); i++) {
  145. if (inputs[i].level == 0 || inputs[i].files.empty()) {
  146. continue;
  147. }
  148. inputs[i].atomic_compaction_unit_boundaries.reserve(inputs[i].files.size());
  149. AtomicCompactionUnitBoundary cur_boundary;
  150. size_t first_atomic_idx = 0;
  151. auto add_unit_boundary = [&](size_t to) {
  152. if (first_atomic_idx == to) {
  153. return;
  154. }
  155. for (size_t k = first_atomic_idx; k < to; k++) {
  156. inputs[i].atomic_compaction_unit_boundaries.push_back(cur_boundary);
  157. }
  158. first_atomic_idx = to;
  159. };
  160. for (size_t j = 0; j < inputs[i].files.size(); j++) {
  161. const auto* f = inputs[i].files[j];
  162. if (j == 0) {
  163. // First file in a level.
  164. cur_boundary.smallest = &f->smallest;
  165. cur_boundary.largest = &f->largest;
  166. } else if (sstableKeyCompare(ucmp, *cur_boundary.largest, f->smallest) ==
  167. 0) {
  168. // SSTs overlap but the end key of the previous file was not
  169. // artificially extended by a range tombstone. Extend the current
  170. // boundary.
  171. cur_boundary.largest = &f->largest;
  172. } else {
  173. // Atomic compaction unit has ended.
  174. add_unit_boundary(j);
  175. cur_boundary.smallest = &f->smallest;
  176. cur_boundary.largest = &f->largest;
  177. }
  178. }
  179. add_unit_boundary(inputs[i].files.size());
  180. assert(inputs[i].files.size() ==
  181. inputs[i].atomic_compaction_unit_boundaries.size());
  182. }
  183. return inputs;
  184. }
  185. // helper function to determine if compaction is creating files at the
  186. // bottommost level
  187. bool Compaction::IsBottommostLevel(
  188. int output_level, VersionStorageInfo* vstorage,
  189. const std::vector<CompactionInputFiles>& inputs) {
  190. int output_l0_idx;
  191. if (output_level == 0) {
  192. output_l0_idx = 0;
  193. for (const auto* file : vstorage->LevelFiles(0)) {
  194. if (inputs[0].files.back() == file) {
  195. break;
  196. }
  197. ++output_l0_idx;
  198. }
  199. assert(static_cast<size_t>(output_l0_idx) < vstorage->LevelFiles(0).size());
  200. } else {
  201. output_l0_idx = -1;
  202. }
  203. Slice smallest_key, largest_key;
  204. GetBoundaryKeys(vstorage, inputs, &smallest_key, &largest_key);
  205. return !vstorage->RangeMightExistAfterSortedRun(smallest_key, largest_key,
  206. output_level, output_l0_idx);
  207. }
  208. // test function to validate the functionality of IsBottommostLevel()
  209. // function -- determines if compaction with inputs and storage is bottommost
  210. bool Compaction::TEST_IsBottommostLevel(
  211. int output_level, VersionStorageInfo* vstorage,
  212. const std::vector<CompactionInputFiles>& inputs) {
  213. return IsBottommostLevel(output_level, vstorage, inputs);
  214. }
  215. bool Compaction::IsFullCompaction(
  216. VersionStorageInfo* vstorage,
  217. const std::vector<CompactionInputFiles>& inputs) {
  218. size_t num_files_in_compaction = 0;
  219. size_t total_num_files = 0;
  220. for (int l = 0; l < vstorage->num_levels(); l++) {
  221. total_num_files += vstorage->NumLevelFiles(l);
  222. }
  223. for (size_t i = 0; i < inputs.size(); i++) {
  224. num_files_in_compaction += inputs[i].size();
  225. }
  226. return num_files_in_compaction == total_num_files;
  227. }
  228. Status Compaction::InitInputTableProperties() {
  229. if (!input_table_properties_.empty()) {
  230. return Status::OK();
  231. }
  232. Status s;
  233. const ReadOptions read_options(Env::IOActivity::kCompaction);
  234. assert(input_version_);
  235. for (size_t i = 0; i < num_input_levels(); ++i) {
  236. for (const FileMetaData* fmd : *(this->inputs(i))) {
  237. std::shared_ptr<const TableProperties> tp;
  238. std::string file_name =
  239. TableFileName(immutable_options_.cf_paths, fmd->fd.GetNumber(),
  240. fmd->fd.GetPathId());
  241. s = input_version_->GetTableProperties(read_options, &tp, fmd,
  242. &file_name);
  243. if (s.ok()) {
  244. input_table_properties_[file_name] = tp;
  245. } else {
  246. ROCKS_LOG_ERROR(immutable_options_.info_log,
  247. "Unable to load table properties for file %" PRIu64
  248. " --- %s\n",
  249. fmd->fd.GetNumber(), s.ToString().c_str());
  250. input_table_properties_.clear();
  251. return s;
  252. }
  253. }
  254. }
  255. return s;
  256. }
  257. Compaction::Compaction(
  258. VersionStorageInfo* vstorage, const ImmutableOptions& _immutable_options,
  259. const MutableCFOptions& _mutable_cf_options,
  260. const MutableDBOptions& _mutable_db_options,
  261. std::vector<CompactionInputFiles> _inputs, int _output_level,
  262. uint64_t _target_file_size, uint64_t _max_compaction_bytes,
  263. uint32_t _output_path_id, CompressionType _compression,
  264. CompressionOptions _compression_opts,
  265. Temperature _output_temperature_override, uint32_t _max_subcompactions,
  266. std::vector<FileMetaData*> _grandparents,
  267. std::optional<SequenceNumber> _earliest_snapshot,
  268. const SnapshotChecker* _snapshot_checker,
  269. CompactionReason _compaction_reason, const std::string& _trim_ts,
  270. double _score, bool l0_files_might_overlap,
  271. BlobGarbageCollectionPolicy _blob_garbage_collection_policy,
  272. double _blob_garbage_collection_age_cutoff)
  273. : input_vstorage_(vstorage),
  274. start_level_(_inputs[0].level),
  275. output_level_(_output_level),
  276. target_output_file_size_(_target_file_size),
  277. max_compaction_bytes_(_max_compaction_bytes),
  278. max_subcompactions_(_max_subcompactions),
  279. immutable_options_(_immutable_options),
  280. mutable_cf_options_(_mutable_cf_options),
  281. input_version_(nullptr),
  282. number_levels_(vstorage->num_levels()),
  283. cfd_(nullptr),
  284. output_path_id_(_output_path_id),
  285. output_compression_(_compression),
  286. output_compression_opts_(_compression_opts),
  287. output_temperature_override_(_output_temperature_override),
  288. deletion_compaction_(_compaction_reason == CompactionReason::kFIFOTtl ||
  289. _compaction_reason ==
  290. CompactionReason::kFIFOMaxSize),
  291. l0_files_might_overlap_(l0_files_might_overlap),
  292. inputs_(PopulateWithAtomicBoundaries(vstorage, std::move(_inputs))),
  293. grandparents_(std::move(_grandparents)),
  294. earliest_snapshot_(_earliest_snapshot),
  295. snapshot_checker_(_snapshot_checker),
  296. score_(_score),
  297. bottommost_level_(
  298. // For simplicity, we don't support the concept of "bottommost level"
  299. // with
  300. // `CompactionReason::kExternalSstIngestion` and
  301. // `CompactionReason::kRefitLevel`
  302. (_compaction_reason == CompactionReason::kExternalSstIngestion ||
  303. _compaction_reason == CompactionReason::kRefitLevel)
  304. ? false
  305. : IsBottommostLevel(output_level_, vstorage, inputs_)),
  306. is_full_compaction_(IsFullCompaction(vstorage, inputs_)),
  307. is_manual_compaction_(_compaction_reason ==
  308. CompactionReason::kManualCompaction),
  309. trim_ts_(_trim_ts),
  310. is_trivial_move_(false),
  311. compaction_reason_(_compaction_reason),
  312. notify_on_compaction_completion_(false),
  313. enable_blob_garbage_collection_(
  314. _blob_garbage_collection_policy == BlobGarbageCollectionPolicy::kForce
  315. ? true
  316. : (_blob_garbage_collection_policy ==
  317. BlobGarbageCollectionPolicy::kDisable
  318. ? false
  319. : mutable_cf_options().enable_blob_garbage_collection)),
  320. blob_garbage_collection_age_cutoff_(
  321. _blob_garbage_collection_age_cutoff < 0 ||
  322. _blob_garbage_collection_age_cutoff > 1
  323. ? mutable_cf_options().blob_garbage_collection_age_cutoff
  324. : _blob_garbage_collection_age_cutoff),
  325. proximal_level_(
  326. // For simplicity, we don't support the concept of "proximal level"
  327. // with `CompactionReason::kExternalSstIngestion` and
  328. // `CompactionReason::kRefitLevel`
  329. _compaction_reason == CompactionReason::kExternalSstIngestion ||
  330. _compaction_reason == CompactionReason::kRefitLevel
  331. ? Compaction::kInvalidLevel
  332. : EvaluateProximalLevel(vstorage, mutable_cf_options_,
  333. immutable_options_, start_level_,
  334. output_level_)) {
  335. MarkFilesBeingCompacted(true);
  336. if (max_subcompactions_ == 0) {
  337. max_subcompactions_ = _mutable_db_options.max_subcompactions;
  338. }
  339. // for the non-bottommost levels, it tries to build files match the target
  340. // file size, but not guaranteed. It could be 2x the size of the target size.
  341. max_output_file_size_ = bottommost_level_ || grandparents_.empty()
  342. ? target_output_file_size_
  343. : 2 * target_output_file_size_;
  344. #ifndef NDEBUG
  345. for (size_t i = 1; i < inputs_.size(); ++i) {
  346. assert(inputs_[i].level > inputs_[i - 1].level);
  347. }
  348. #endif
  349. // setup input_levels_ and filtered_input_levels_
  350. {
  351. input_levels_.resize(num_input_levels());
  352. filtered_input_levels_.resize(num_input_levels());
  353. if (earliest_snapshot_.has_value()) {
  354. FilterInputsForCompactionIterator();
  355. } else {
  356. for (size_t which = 0; which < num_input_levels(); which++) {
  357. DoGenerateLevelFilesBrief(&input_levels_[which], inputs_[which].files,
  358. &arena_);
  359. }
  360. }
  361. }
  362. GetBoundaryKeys(vstorage, inputs_, &smallest_user_key_, &largest_user_key_);
  363. // Every compaction regardless of any compaction reason may respect the
  364. // existing compact cursor in the output level to split output files
  365. output_split_key_ = nullptr;
  366. if (immutable_options_.compaction_style == kCompactionStyleLevel &&
  367. immutable_options_.compaction_pri == kRoundRobin) {
  368. const InternalKey* cursor =
  369. &input_vstorage_->GetCompactCursors()[output_level_];
  370. if (cursor->size() != 0) {
  371. const Slice& cursor_user_key = ExtractUserKey(cursor->Encode());
  372. auto ucmp = vstorage->InternalComparator()->user_comparator();
  373. // May split output files according to the cursor if it in the user-key
  374. // range
  375. if (ucmp->CompareWithoutTimestamp(cursor_user_key, smallest_user_key_) >
  376. 0 &&
  377. ucmp->CompareWithoutTimestamp(cursor_user_key, largest_user_key_) <=
  378. 0) {
  379. output_split_key_ = cursor;
  380. }
  381. }
  382. }
  383. PopulateProximalLevelOutputRange();
  384. }
  385. void Compaction::PopulateProximalLevelOutputRange() {
  386. if (!SupportsPerKeyPlacement()) {
  387. assert(keep_in_last_level_through_seqno_ == kMaxSequenceNumber);
  388. return;
  389. }
  390. // exclude the last level, the range of all input levels is the safe range
  391. // of keys that can be moved up.
  392. int exclude_level = number_levels_ - 1;
  393. proximal_output_range_type_ = ProximalOutputRangeType::kNonLastRange;
  394. // For universal compaction, the proximal_output_range could be extended if
  395. // all proximal level files are included in the compaction (which includes
  396. // the case that the proximal level is empty).
  397. if (immutable_options_.compaction_style == kCompactionStyleUniversal) {
  398. exclude_level = kInvalidLevel;
  399. proximal_output_range_type_ = ProximalOutputRangeType::kFullRange;
  400. std::set<uint64_t> proximal_inputs;
  401. for (const auto& input_lvl : inputs_) {
  402. if (input_lvl.level == proximal_level_) {
  403. for (const auto& file : input_lvl.files) {
  404. proximal_inputs.emplace(file->fd.GetNumber());
  405. }
  406. }
  407. }
  408. auto proximal_files = input_vstorage_->LevelFiles(proximal_level_);
  409. for (const auto& file : proximal_files) {
  410. if (proximal_inputs.find(file->fd.GetNumber()) == proximal_inputs.end()) {
  411. exclude_level = number_levels_ - 1;
  412. proximal_output_range_type_ = ProximalOutputRangeType::kNonLastRange;
  413. break;
  414. }
  415. }
  416. }
  417. // FIXME: should make use of `proximal_output_range_type_`.
  418. // FIXME: when last level's input range does not overlap with
  419. // proximal level, and proximal level input is empty,
  420. // this call will not set proximal_level_smallest_ or
  421. // proximal_level_largest_. No keys will be compacted up.
  422. GetBoundaryInternalKeys(input_vstorage_, inputs_, &proximal_level_smallest_,
  423. &proximal_level_largest_, exclude_level);
  424. if (proximal_output_range_type_ != ProximalOutputRangeType::kFullRange) {
  425. // If not full range in proximal level, must keep everything already
  426. // in the last level there, because moving it back up might cause
  427. // overlap/placement issues that are difficult to resolve properly in the
  428. // presence of range deletes
  429. SequenceNumber max_last_level_seqno = 0;
  430. for (const auto& input_lvl : inputs_) {
  431. if (input_lvl.level == output_level_) {
  432. for (const auto& file : input_lvl.files) {
  433. max_last_level_seqno =
  434. std::max(max_last_level_seqno, file->fd.largest_seqno);
  435. }
  436. }
  437. }
  438. keep_in_last_level_through_seqno_ = max_last_level_seqno;
  439. } else {
  440. keep_in_last_level_through_seqno_ = 0;
  441. }
  442. }
  443. Compaction::~Compaction() {
  444. if (input_version_ != nullptr) {
  445. input_version_->Unref();
  446. }
  447. if (cfd_ != nullptr) {
  448. cfd_->UnrefAndTryDelete();
  449. }
  450. }
  451. bool Compaction::SupportsPerKeyPlacement() const {
  452. return proximal_level_ != kInvalidLevel;
  453. }
  454. int Compaction::GetProximalLevel() const { return proximal_level_; }
  455. // smallest_key and largest_key include timestamps if user-defined timestamp is
  456. // enabled.
  457. bool Compaction::OverlapProximalLevelOutputRange(
  458. const Slice& smallest_key, const Slice& largest_key) const {
  459. if (!SupportsPerKeyPlacement()) {
  460. return false;
  461. }
  462. // See FIXME in Compaction::PopulateProximalLevelOutputRange().
  463. // We do not compact any key up in this case.
  464. if (proximal_level_smallest_.size() == 0 ||
  465. proximal_level_largest_.size() == 0) {
  466. return false;
  467. }
  468. const Comparator* ucmp =
  469. input_vstorage_->InternalComparator()->user_comparator();
  470. return ucmp->CompareWithoutTimestamp(
  471. smallest_key, proximal_level_largest_.user_key()) <= 0 &&
  472. ucmp->CompareWithoutTimestamp(
  473. largest_key, proximal_level_smallest_.user_key()) >= 0;
  474. }
  475. // key includes timestamp if user-defined timestamp is enabled.
  476. void Compaction::TEST_AssertWithinProximalLevelOutputRange(
  477. const Slice& user_key, bool expect_failure) const {
  478. #ifdef NDEBUG
  479. (void)user_key;
  480. (void)expect_failure;
  481. #else
  482. assert(SupportsPerKeyPlacement());
  483. assert(proximal_level_smallest_.size() > 0);
  484. assert(proximal_level_largest_.size() > 0);
  485. auto* cmp = input_vstorage_->user_comparator();
  486. // op_type of a key can change during compaction, e.g. Merge -> Put.
  487. if (!(cmp->Compare(user_key, proximal_level_smallest_.user_key()) >= 0)) {
  488. assert(expect_failure);
  489. } else if (!(cmp->Compare(user_key, proximal_level_largest_.user_key()) <=
  490. 0)) {
  491. assert(expect_failure);
  492. } else {
  493. assert(!expect_failure);
  494. }
  495. #endif
  496. }
  497. bool Compaction::InputCompressionMatchesOutput() const {
  498. int base_level = input_vstorage_->base_level();
  499. bool matches =
  500. (GetCompressionType(input_vstorage_, mutable_cf_options_, start_level_,
  501. base_level) == output_compression_);
  502. if (matches) {
  503. TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:Matches");
  504. return true;
  505. }
  506. TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:DidntMatch");
  507. return matches;
  508. }
  509. bool Compaction::IsTrivialMove() const {
  510. // Avoid a move if there is lots of overlapping grandparent data.
  511. // Otherwise, the move could create a parent file that will require
  512. // a very expensive merge later on.
  513. // If start_level_== output_level_, the purpose is to force compaction
  514. // filter to be applied to that level, and thus cannot be a trivial move.
  515. // Check if start level have files with overlapping ranges
  516. if (start_level_ == 0 && input_vstorage_->level0_non_overlapping() == false &&
  517. l0_files_might_overlap_) {
  518. // We cannot move files from L0 to L1 if the L0 files in the LSM-tree are
  519. // overlapping, unless we are sure that files picked in L0 don't overlap.
  520. return false;
  521. }
  522. if (is_manual_compaction_ &&
  523. (immutable_options_.compaction_filter != nullptr ||
  524. immutable_options_.compaction_filter_factory != nullptr)) {
  525. // This is a manual compaction and we have a compaction filter that should
  526. // be executed, we cannot do a trivial move
  527. return false;
  528. }
  529. if (start_level_ == output_level_) {
  530. // It doesn't make sense if compaction picker picks files just to trivial
  531. // move to the same level.
  532. return false;
  533. }
  534. if (compaction_reason_ == CompactionReason::kChangeTemperature) {
  535. // Changing temperature usually requires rewriting the file.
  536. return false;
  537. }
  538. // Used in universal compaction, where trivial move can be done if the
  539. // input files are non overlapping
  540. if ((mutable_cf_options_.compaction_options_universal.allow_trivial_move) &&
  541. (output_level_ != 0) &&
  542. (cfd_->ioptions().compaction_style == kCompactionStyleUniversal)) {
  543. return is_trivial_move_;
  544. }
  545. if (!(start_level_ != output_level_ && num_input_levels() == 1 &&
  546. input(0, 0)->fd.GetPathId() == output_path_id() &&
  547. InputCompressionMatchesOutput())) {
  548. return false;
  549. }
  550. // assert inputs_.size() == 1
  551. if (output_level_ + 1 < number_levels_) {
  552. std::unique_ptr<SstPartitioner> partitioner = CreateSstPartitioner();
  553. for (const auto& file : inputs_.front().files) {
  554. std::vector<FileMetaData*> file_grand_parents;
  555. input_vstorage_->GetOverlappingInputs(output_level_ + 1, &file->smallest,
  556. &file->largest,
  557. &file_grand_parents);
  558. const auto compaction_size =
  559. file->fd.GetFileSize() + TotalFileSize(file_grand_parents);
  560. if (compaction_size > max_compaction_bytes_) {
  561. return false;
  562. }
  563. if (partitioner.get() != nullptr) {
  564. if (!partitioner->CanDoTrivialMove(file->smallest.user_key(),
  565. file->largest.user_key())) {
  566. return false;
  567. }
  568. }
  569. }
  570. }
  571. // PerKeyPlacement compaction should never be trivial move.
  572. if (SupportsPerKeyPlacement()) {
  573. return false;
  574. }
  575. return true;
  576. }
  577. void Compaction::AddInputDeletions(VersionEdit* out_edit) {
  578. for (size_t which = 0; which < num_input_levels(); which++) {
  579. for (size_t i = 0; i < inputs_[which].size(); i++) {
  580. out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
  581. }
  582. }
  583. }
  584. bool Compaction::KeyNotExistsBeyondOutputLevel(
  585. const Slice& user_key, std::vector<size_t>* level_ptrs) const {
  586. assert(input_version_ != nullptr);
  587. assert(level_ptrs != nullptr);
  588. assert(level_ptrs->size() == static_cast<size_t>(number_levels_));
  589. if (bottommost_level_) {
  590. return true;
  591. } else if (output_level_ != 0 &&
  592. cfd_->ioptions().compaction_style == kCompactionStyleLevel) {
  593. // TODO: apply the optimization here to other compaction styles and
  594. // compaction/flush to L0.
  595. // Maybe use binary search to find right entry instead of linear search?
  596. const Comparator* user_cmp = cfd_->user_comparator();
  597. for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) {
  598. const std::vector<FileMetaData*>& files =
  599. input_vstorage_->LevelFiles(lvl);
  600. for (; level_ptrs->at(lvl) < files.size(); level_ptrs->at(lvl)++) {
  601. auto* f = files[level_ptrs->at(lvl)];
  602. if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
  603. // We've advanced far enough
  604. // In the presence of user-defined timestamp, we may need to handle
  605. // the case in which f->smallest.user_key() (including ts) has the
  606. // same user key, but the ts part is smaller. If so,
  607. // Compare(user_key, f->smallest.user_key()) returns -1.
  608. // That's why we need CompareWithoutTimestamp().
  609. if (user_cmp->CompareWithoutTimestamp(user_key,
  610. f->smallest.user_key()) >= 0) {
  611. // Key falls in this file's range, so it may
  612. // exist beyond output level
  613. return false;
  614. }
  615. break;
  616. }
  617. }
  618. }
  619. return true;
  620. }
  621. return false;
  622. }
  623. bool Compaction::KeyRangeNotExistsBeyondOutputLevel(
  624. const Slice& begin_key, const Slice& end_key,
  625. std::vector<size_t>* level_ptrs) const {
  626. assert(input_version_ != nullptr);
  627. assert(level_ptrs != nullptr);
  628. assert(level_ptrs->size() == static_cast<size_t>(number_levels_));
  629. assert(cfd_->user_comparator()->CompareWithoutTimestamp(begin_key, end_key) <
  630. 0);
  631. if (bottommost_level_) {
  632. return true /* does not overlap */;
  633. } else if (output_level_ != 0 &&
  634. cfd_->ioptions().compaction_style == kCompactionStyleLevel) {
  635. const Comparator* user_cmp = cfd_->user_comparator();
  636. for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) {
  637. const std::vector<FileMetaData*>& files =
  638. input_vstorage_->LevelFiles(lvl);
  639. for (; level_ptrs->at(lvl) < files.size(); level_ptrs->at(lvl)++) {
  640. auto* f = files[level_ptrs->at(lvl)];
  641. // Advance until the first file with begin_key <= f->largest.user_key()
  642. if (user_cmp->CompareWithoutTimestamp(begin_key,
  643. f->largest.user_key()) > 0) {
  644. continue;
  645. }
  646. // We know that the previous file prev_f, if exists, has
  647. // prev_f->largest.user_key() < begin_key.
  648. if (user_cmp->CompareWithoutTimestamp(end_key,
  649. f->smallest.user_key()) <= 0) {
  650. // not overlapping with this level
  651. break;
  652. } else {
  653. // We have:
  654. // - begin_key < end_key,
  655. // - begin_key <= f->largest.user_key(), and
  656. // - end_key > f->smallest.user_key()
  657. return false /* overlap */;
  658. }
  659. }
  660. }
  661. return true /* does not overlap */;
  662. }
  663. return false /* overlaps */;
  664. };
  665. // Mark (or clear) each file that is being compacted
  666. void Compaction::MarkFilesBeingCompacted(bool being_compacted) const {
  667. for (size_t i = 0; i < num_input_levels(); i++) {
  668. for (size_t j = 0; j < inputs_[i].size(); j++) {
  669. assert(being_compacted != inputs_[i][j]->being_compacted);
  670. inputs_[i][j]->being_compacted = being_compacted;
  671. }
  672. }
  673. }
  674. // Sample output:
  675. // If compacting 3 L0 files, 2 L3 files and 1 L4 file, and outputting to L5,
  676. // print: "3@0 + 2@3 + 1@4 files to L5"
  677. const char* Compaction::InputLevelSummary(
  678. InputLevelSummaryBuffer* scratch) const {
  679. int len = 0;
  680. bool is_first = true;
  681. for (auto& input_level : inputs_) {
  682. if (input_level.empty()) {
  683. continue;
  684. }
  685. if (!is_first) {
  686. len +=
  687. snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, " + ");
  688. len = std::min(len, static_cast<int>(sizeof(scratch->buffer)));
  689. } else {
  690. is_first = false;
  691. }
  692. len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
  693. "%" ROCKSDB_PRIszt "@%d", input_level.size(),
  694. input_level.level);
  695. len = std::min(len, static_cast<int>(sizeof(scratch->buffer)));
  696. }
  697. snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
  698. " files to L%d", output_level());
  699. return scratch->buffer;
  700. }
  701. uint64_t Compaction::CalculateTotalInputSize() const {
  702. uint64_t size = 0;
  703. for (auto& input_level : inputs_) {
  704. for (auto f : input_level.files) {
  705. size += f->fd.GetFileSize();
  706. }
  707. }
  708. return size;
  709. }
  710. void Compaction::ReleaseCompactionFiles(const Status& status) {
  711. MarkFilesBeingCompacted(false);
  712. cfd_->compaction_picker()->ReleaseCompactionFiles(this, status);
  713. }
  714. void Compaction::ResetNextCompactionIndex() {
  715. assert(input_version_ != nullptr);
  716. input_vstorage_->ResetNextCompactionIndex(start_level_);
  717. }
  718. namespace {
  719. int InputSummary(const std::vector<FileMetaData*>& files,
  720. const std::vector<bool>& files_filtered, char* output,
  721. int len) {
  722. assert(files_filtered.empty() || (files.size() == files_filtered.size()));
  723. *output = '\0';
  724. int write = 0;
  725. for (size_t i = 0; i < files.size(); i++) {
  726. int sz = len - write;
  727. int ret;
  728. char sztxt[16];
  729. AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16);
  730. if (files_filtered.empty()) {
  731. ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ",
  732. files.at(i)->fd.GetNumber(), sztxt);
  733. } else {
  734. ret = snprintf(output + write, sz, "%" PRIu64 "(%s filtered:%s) ",
  735. files.at(i)->fd.GetNumber(), sztxt,
  736. files_filtered.at(i) ? "true" : "false");
  737. }
  738. if (ret < 0 || ret >= sz) {
  739. break;
  740. }
  741. write += ret;
  742. }
  743. // if files.size() is non-zero, overwrite the last space
  744. return write - !!files.size();
  745. }
  746. } // namespace
  747. void Compaction::Summary(char* output, int len) {
  748. int write =
  749. snprintf(output, len, "Base version %" PRIu64 " Base level %d, inputs: [",
  750. input_version_->GetVersionNumber(), start_level_);
  751. if (write < 0 || write >= len) {
  752. return;
  753. }
  754. for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
  755. if (level_iter > 0) {
  756. write += snprintf(output + write, len - write, "], [");
  757. if (write < 0 || write >= len) {
  758. return;
  759. }
  760. }
  761. assert(non_start_level_input_files_filtered_.empty() ||
  762. non_start_level_input_files_filtered_.size() == inputs_.size() - 1);
  763. write += InputSummary(
  764. inputs_[level_iter].files,
  765. (level_iter == 0 || non_start_level_input_files_filtered_.empty())
  766. ? std::vector<bool>{}
  767. : non_start_level_input_files_filtered_[level_iter - 1],
  768. output + write, len - write);
  769. if (write < 0 || write >= len) {
  770. return;
  771. }
  772. }
  773. snprintf(output + write, len - write, "]");
  774. }
  775. uint64_t Compaction::OutputFilePreallocationSize() const {
  776. uint64_t preallocation_size = 0;
  777. for (const auto& level_files : inputs_) {
  778. for (const auto& file : level_files.files) {
  779. preallocation_size += file->fd.GetFileSize();
  780. }
  781. }
  782. if (max_output_file_size_ != std::numeric_limits<uint64_t>::max() &&
  783. (immutable_options_.compaction_style == kCompactionStyleLevel ||
  784. output_level() > 0)) {
  785. preallocation_size = std::min(max_output_file_size_, preallocation_size);
  786. }
  787. // Over-estimate slightly so we don't end up just barely crossing
  788. // the threshold
  789. // No point to preallocate more than 1GB.
  790. return std::min(uint64_t{1073741824},
  791. preallocation_size + (preallocation_size / 10));
  792. }
  793. std::unique_ptr<CompactionFilter> Compaction::CreateCompactionFilter() const {
  794. if (!cfd_->ioptions().compaction_filter_factory) {
  795. return nullptr;
  796. }
  797. if (!cfd_->ioptions()
  798. .compaction_filter_factory->ShouldFilterTableFileCreation(
  799. TableFileCreationReason::kCompaction)) {
  800. return nullptr;
  801. }
  802. CompactionFilter::Context context;
  803. context.is_full_compaction = is_full_compaction_;
  804. context.is_manual_compaction = is_manual_compaction_;
  805. context.input_start_level = start_level_;
  806. context.column_family_id = cfd_->GetID();
  807. context.reason = TableFileCreationReason::kCompaction;
  808. context.input_table_properties = GetInputTableProperties();
  809. if (context.input_table_properties.empty()) {
  810. ROCKS_LOG_WARN(
  811. immutable_options_.info_log,
  812. "Unable to set `input_table_properties` of `CompactionFilter::Context` "
  813. "for compaction.");
  814. }
  815. return cfd_->ioptions().compaction_filter_factory->CreateCompactionFilter(
  816. context);
  817. }
  818. std::unique_ptr<SstPartitioner> Compaction::CreateSstPartitioner() const {
  819. if (!immutable_options_.sst_partitioner_factory) {
  820. return nullptr;
  821. }
  822. SstPartitioner::Context context;
  823. context.is_full_compaction = is_full_compaction_;
  824. context.is_manual_compaction = is_manual_compaction_;
  825. context.output_level = output_level_;
  826. context.smallest_user_key = smallest_user_key_;
  827. context.largest_user_key = largest_user_key_;
  828. return immutable_options_.sst_partitioner_factory->CreatePartitioner(context);
  829. }
  830. bool Compaction::IsOutputLevelEmpty() const {
  831. return inputs_.back().level != output_level_ || inputs_.back().empty();
  832. }
  833. bool Compaction::ShouldFormSubcompactions() const {
  834. if (cfd_ == nullptr) {
  835. return false;
  836. }
  837. if (mutable_cf_options_.table_factory->Name() ==
  838. TableFactory::kPlainTableName()) {
  839. return false;
  840. }
  841. // Round-Robin pri under leveled compaction allows subcompactions by default
  842. // and the number of subcompactions can be larger than max_subcompactions_
  843. if (cfd_->ioptions().compaction_pri == kRoundRobin &&
  844. cfd_->ioptions().compaction_style == kCompactionStyleLevel) {
  845. return output_level_ > 0;
  846. }
  847. if (max_subcompactions_ <= 1) {
  848. return false;
  849. }
  850. if (cfd_->ioptions().compaction_style == kCompactionStyleLevel) {
  851. return (start_level_ == 0 || is_manual_compaction_) && output_level_ > 0;
  852. } else if (cfd_->ioptions().compaction_style == kCompactionStyleUniversal) {
  853. return number_levels_ > 1 && output_level_ > 0;
  854. } else {
  855. return false;
  856. }
  857. }
  858. bool Compaction::DoesInputReferenceBlobFiles() const {
  859. assert(input_version_);
  860. const VersionStorageInfo* storage_info = input_version_->storage_info();
  861. assert(storage_info);
  862. if (storage_info->GetBlobFiles().empty()) {
  863. return false;
  864. }
  865. for (size_t i = 0; i < inputs_.size(); ++i) {
  866. for (const FileMetaData* meta : inputs_[i].files) {
  867. assert(meta);
  868. if (meta->oldest_blob_file_number != kInvalidBlobFileNumber) {
  869. return true;
  870. }
  871. }
  872. }
  873. return false;
  874. }
  875. uint64_t Compaction::MaxInputFileNewestKeyTime(const InternalKey* start,
  876. const InternalKey* end) const {
  877. uint64_t newest_key_time = kUnknownNewestKeyTime;
  878. const InternalKeyComparator& icmp =
  879. column_family_data()->internal_comparator();
  880. for (const auto& level_files : inputs_) {
  881. for (const auto& file : level_files.files) {
  882. if (start != nullptr && icmp.Compare(file->largest, *start) < 0) {
  883. continue;
  884. }
  885. if (end != nullptr && icmp.Compare(file->smallest, *end) > 0) {
  886. continue;
  887. }
  888. newest_key_time = std::max(newest_key_time, file->TryGetNewestKeyTime());
  889. }
  890. }
  891. return newest_key_time;
  892. }
  893. uint64_t Compaction::MinInputFileOldestAncesterTime(
  894. const InternalKey* start, const InternalKey* end) const {
  895. uint64_t min_oldest_ancester_time = std::numeric_limits<uint64_t>::max();
  896. const InternalKeyComparator& icmp =
  897. column_family_data()->internal_comparator();
  898. for (const auto& level_files : inputs_) {
  899. for (const auto& file : level_files.files) {
  900. if (start != nullptr && icmp.Compare(file->largest, *start) < 0) {
  901. continue;
  902. }
  903. if (end != nullptr && icmp.Compare(file->smallest, *end) > 0) {
  904. continue;
  905. }
  906. uint64_t oldest_ancester_time = file->TryGetOldestAncesterTime();
  907. if (oldest_ancester_time != 0) {
  908. min_oldest_ancester_time =
  909. std::min(min_oldest_ancester_time, oldest_ancester_time);
  910. }
  911. }
  912. }
  913. return min_oldest_ancester_time;
  914. }
  915. uint64_t Compaction::MinInputFileEpochNumber() const {
  916. uint64_t min_epoch_number = std::numeric_limits<uint64_t>::max();
  917. for (const auto& inputs_per_level : inputs_) {
  918. for (const auto& file : inputs_per_level.files) {
  919. min_epoch_number = std::min(min_epoch_number, file->epoch_number);
  920. }
  921. }
  922. return min_epoch_number;
  923. }
  924. int Compaction::EvaluateProximalLevel(
  925. const VersionStorageInfo* vstorage,
  926. const MutableCFOptions& mutable_cf_options,
  927. const ImmutableOptions& immutable_options, const int start_level,
  928. const int output_level) {
  929. // TODO: currently per_key_placement feature only support level and universal
  930. // compaction
  931. if (immutable_options.compaction_style != kCompactionStyleLevel &&
  932. immutable_options.compaction_style != kCompactionStyleUniversal) {
  933. return kInvalidLevel;
  934. }
  935. if (output_level != immutable_options.num_levels - 1) {
  936. return kInvalidLevel;
  937. }
  938. int proximal_level = output_level - 1;
  939. assert(proximal_level < immutable_options.num_levels);
  940. if (proximal_level <= 0) {
  941. return kInvalidLevel;
  942. }
  943. // If the proximal level is not within input level -> output level range
  944. // check if the proximal output level is empty, if it's empty, it could
  945. // also be locked for the proximal output.
  946. // TODO: ideally, it only needs to check if there's a file within the
  947. // compaction output key range. For simplicity, it just check if there's any
  948. // file on the proximal level.
  949. if (start_level == immutable_options.num_levels - 1 &&
  950. (immutable_options.compaction_style != kCompactionStyleUniversal ||
  951. !vstorage->LevelFiles(proximal_level).empty())) {
  952. return kInvalidLevel;
  953. }
  954. bool supports_per_key_placement =
  955. mutable_cf_options.preclude_last_level_data_seconds > 0;
  956. // it could be overridden by unittest
  957. TEST_SYNC_POINT_CALLBACK("Compaction::SupportsPerKeyPlacement:Enabled",
  958. &supports_per_key_placement);
  959. if (!supports_per_key_placement) {
  960. return kInvalidLevel;
  961. }
  962. return proximal_level;
  963. }
  964. void Compaction::FilterInputsForCompactionIterator() {
  965. assert(earliest_snapshot_.has_value());
  966. // cfd_ is not populated at Compaction construction time, get it from
  967. // VersionStorageInfo instead.
  968. assert(input_vstorage_);
  969. const auto* ucmp = input_vstorage_->user_comparator();
  970. assert(ucmp);
  971. // Simply comparing file boundaries when user-defined timestamp is defined
  972. // is not as safe because we need to also compare timestamp to know for
  973. // sure. Although entries with higher timestamp is also supposed to have
  974. // higher sequence number for the same user key (without timestamp).
  975. assert(ucmp->timestamp_size() == 0);
  976. size_t num_input_levels = inputs_.size();
  977. // TODO(yuzhangyu): filtering of older L0 file by new L0 file is not
  978. // supported yet.
  979. FileMetaData* rangedel_candidate = inputs_[0].level == 0
  980. ? inputs_[0].files.back()
  981. : inputs_[0].files.front();
  982. assert(rangedel_candidate);
  983. if (!rangedel_candidate->FileIsStandAloneRangeTombstone() ||
  984. !DataIsDefinitelyInSnapshot(rangedel_candidate->fd.smallest_seqno,
  985. earliest_snapshot_.value(),
  986. snapshot_checker_)) {
  987. for (size_t level = 0; level < num_input_levels; level++) {
  988. DoGenerateLevelFilesBrief(&input_levels_[level], inputs_[level].files,
  989. &arena_);
  990. }
  991. return;
  992. }
  993. Slice rangedel_start_ukey = rangedel_candidate->smallest.user_key();
  994. Slice rangedel_end_ukey = rangedel_candidate->largest.user_key();
  995. SequenceNumber rangedel_seqno = rangedel_candidate->fd.smallest_seqno;
  996. std::vector<std::vector<FileMetaData*>> non_start_level_input_files;
  997. non_start_level_input_files.reserve(num_input_levels - 1);
  998. non_start_level_input_files_filtered_.reserve(num_input_levels - 1);
  999. for (size_t level = 1; level < num_input_levels; level++) {
  1000. non_start_level_input_files.emplace_back();
  1001. non_start_level_input_files_filtered_.emplace_back();
  1002. for (FileMetaData* file : inputs_[level].files) {
  1003. non_start_level_input_files_filtered_.back().push_back(false);
  1004. // When range data and point data has the same sequence number, point
  1005. // data wins. Range deletion end key is exclusive, so check it's bigger
  1006. // than file right boundary user key.
  1007. if (rangedel_seqno > file->fd.largest_seqno &&
  1008. ucmp->CompareWithoutTimestamp(rangedel_start_ukey,
  1009. file->smallest.user_key()) <= 0 &&
  1010. ucmp->CompareWithoutTimestamp(rangedel_end_ukey,
  1011. file->largest.user_key()) > 0) {
  1012. non_start_level_input_files_filtered_.back().back() = true;
  1013. filtered_input_levels_[level].push_back(file);
  1014. } else {
  1015. non_start_level_input_files.back().push_back(file);
  1016. }
  1017. }
  1018. }
  1019. DoGenerateLevelFilesBrief(&input_levels_[0], inputs_[0].files, &arena_);
  1020. assert(non_start_level_input_files.size() == num_input_levels - 1);
  1021. for (size_t level = 1; level < num_input_levels; level++) {
  1022. DoGenerateLevelFilesBrief(&input_levels_[level],
  1023. non_start_level_input_files[level - 1], &arena_);
  1024. }
  1025. }
  1026. Temperature Compaction::GetOutputTemperature(bool is_proximal_level) const {
  1027. if (output_temperature_override_ != Temperature::kUnknown) {
  1028. return output_temperature_override_;
  1029. }
  1030. if (is_last_level() && !is_proximal_level &&
  1031. mutable_cf_options_.last_level_temperature != Temperature::kUnknown) {
  1032. return mutable_cf_options_.last_level_temperature;
  1033. }
  1034. return mutable_cf_options_.default_write_temperature;
  1035. }
  1036. } // namespace ROCKSDB_NAMESPACE