compaction_picker.cc 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "db/compaction/compaction_picker.h"
  10. #include <cinttypes>
  11. #include <limits>
  12. #include <queue>
  13. #include <string>
  14. #include <utility>
  15. #include <vector>
  16. #include "db/column_family.h"
  17. #include "file/filename.h"
  18. #include "logging/log_buffer.h"
  19. #include "logging/logging.h"
  20. #include "monitoring/statistics_impl.h"
  21. #include "test_util/sync_point.h"
  22. #include "util/random.h"
  23. #include "util/string_util.h"
  24. namespace ROCKSDB_NAMESPACE {
  25. bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
  26. size_t min_files_to_compact,
  27. uint64_t max_compact_bytes_per_del_file,
  28. uint64_t max_compaction_bytes,
  29. CompactionInputFiles* comp_inputs) {
  30. TEST_SYNC_POINT("FindIntraL0Compaction");
  31. size_t start = 0;
  32. if (level_files.size() == 0 || level_files[start]->being_compacted) {
  33. return false;
  34. }
  35. size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size);
  36. size_t compact_bytes_per_del_file = std::numeric_limits<size_t>::max();
  37. // Compaction range will be [start, limit).
  38. size_t limit;
  39. // Pull in files until the amount of compaction work per deleted file begins
  40. // increasing or maximum total compaction size is reached.
  41. size_t new_compact_bytes_per_del_file = 0;
  42. for (limit = start + 1; limit < level_files.size(); ++limit) {
  43. compact_bytes += static_cast<size_t>(level_files[limit]->fd.file_size);
  44. new_compact_bytes_per_del_file = compact_bytes / (limit - start);
  45. if (level_files[limit]->being_compacted ||
  46. new_compact_bytes_per_del_file > compact_bytes_per_del_file ||
  47. compact_bytes > max_compaction_bytes) {
  48. break;
  49. }
  50. compact_bytes_per_del_file = new_compact_bytes_per_del_file;
  51. }
  52. if ((limit - start) >= min_files_to_compact &&
  53. compact_bytes_per_del_file < max_compact_bytes_per_del_file) {
  54. assert(comp_inputs != nullptr);
  55. comp_inputs->level = 0;
  56. for (size_t i = start; i < limit; ++i) {
  57. comp_inputs->files.push_back(level_files[i]);
  58. }
  59. return true;
  60. }
  61. return false;
  62. }
  63. // Determine compression type, based on user options, level of the output
  64. // file and whether compression is disabled.
  65. // If enable_compression is false, then compression is always disabled no
  66. // matter what the values of the other two parameters are.
  67. // Otherwise, the compression type is determined based on options and level.
  68. CompressionType GetCompressionType(const VersionStorageInfo* vstorage,
  69. const MutableCFOptions& mutable_cf_options,
  70. int level, int base_level,
  71. const bool enable_compression) {
  72. if (!enable_compression) {
  73. // disable compression
  74. return kNoCompression;
  75. }
  76. // If bottommost_compression is set and we are compacting to the
  77. // bottommost level then we should use it.
  78. if (mutable_cf_options.bottommost_compression != kDisableCompressionOption &&
  79. level >= (vstorage->num_non_empty_levels() - 1)) {
  80. return mutable_cf_options.bottommost_compression;
  81. }
  82. // If the user has specified a different compression level for each level,
  83. // then pick the compression for that level.
  84. if (!mutable_cf_options.compression_per_level.empty()) {
  85. assert(level == 0 || level >= base_level);
  86. int idx = (level == 0) ? 0 : level - base_level + 1;
  87. const int n =
  88. static_cast<int>(mutable_cf_options.compression_per_level.size()) - 1;
  89. // It is possible for level_ to be -1; in that case, we use level
  90. // 0's compression. This occurs mostly in backwards compatibility
  91. // situations when the builder doesn't know what level the file
  92. // belongs to. Likewise, if level is beyond the end of the
  93. // specified compression levels, use the last value.
  94. return mutable_cf_options
  95. .compression_per_level[std::max(0, std::min(idx, n))];
  96. } else {
  97. return mutable_cf_options.compression;
  98. }
  99. }
  100. CompressionOptions GetCompressionOptions(const MutableCFOptions& cf_options,
  101. const VersionStorageInfo* vstorage,
  102. int level,
  103. const bool enable_compression) {
  104. if (!enable_compression) {
  105. return cf_options.compression_opts;
  106. }
  107. // If bottommost_compression_opts is enabled and we are compacting to the
  108. // bottommost level then we should use the specified compression options.
  109. if (level >= (vstorage->num_non_empty_levels() - 1) &&
  110. cf_options.bottommost_compression_opts.enabled) {
  111. return cf_options.bottommost_compression_opts;
  112. }
  113. return cf_options.compression_opts;
  114. }
  115. CompactionPicker::CompactionPicker(const ImmutableOptions& ioptions,
  116. const InternalKeyComparator* icmp)
  117. : ioptions_(ioptions), icmp_(icmp) {}
  118. CompactionPicker::~CompactionPicker() = default;
  119. // Delete this compaction from the list of running compactions.
  120. void CompactionPicker::ReleaseCompactionFiles(Compaction* c,
  121. const Status& status) {
  122. UnregisterCompaction(c);
  123. if (!status.ok()) {
  124. c->ResetNextCompactionIndex();
  125. }
  126. }
  127. void CompactionPicker::GetRange(const CompactionInputFiles& inputs,
  128. InternalKey* smallest,
  129. InternalKey* largest) const {
  130. const int level = inputs.level;
  131. assert(!inputs.empty());
  132. smallest->Clear();
  133. largest->Clear();
  134. if (level == 0) {
  135. for (size_t i = 0; i < inputs.size(); i++) {
  136. FileMetaData* f = inputs[i];
  137. if (i == 0) {
  138. *smallest = f->smallest;
  139. *largest = f->largest;
  140. } else {
  141. if (icmp_->Compare(f->smallest, *smallest) < 0) {
  142. *smallest = f->smallest;
  143. }
  144. if (icmp_->Compare(f->largest, *largest) > 0) {
  145. *largest = f->largest;
  146. }
  147. }
  148. }
  149. } else {
  150. *smallest = inputs[0]->smallest;
  151. *largest = inputs[inputs.size() - 1]->largest;
  152. }
  153. }
  154. void CompactionPicker::GetRange(const CompactionInputFiles& inputs1,
  155. const CompactionInputFiles& inputs2,
  156. InternalKey* smallest,
  157. InternalKey* largest) const {
  158. assert(!inputs1.empty() || !inputs2.empty());
  159. if (inputs1.empty()) {
  160. GetRange(inputs2, smallest, largest);
  161. } else if (inputs2.empty()) {
  162. GetRange(inputs1, smallest, largest);
  163. } else {
  164. InternalKey smallest1, smallest2, largest1, largest2;
  165. GetRange(inputs1, &smallest1, &largest1);
  166. GetRange(inputs2, &smallest2, &largest2);
  167. *smallest =
  168. icmp_->Compare(smallest1, smallest2) < 0 ? smallest1 : smallest2;
  169. *largest = icmp_->Compare(largest1, largest2) < 0 ? largest2 : largest1;
  170. }
  171. }
  172. void CompactionPicker::GetRange(const std::vector<CompactionInputFiles>& inputs,
  173. InternalKey* smallest, InternalKey* largest,
  174. int exclude_level) const {
  175. InternalKey current_smallest;
  176. InternalKey current_largest;
  177. bool initialized = false;
  178. for (const auto& in : inputs) {
  179. if (in.empty() || in.level == exclude_level) {
  180. continue;
  181. }
  182. GetRange(in, &current_smallest, &current_largest);
  183. if (!initialized) {
  184. *smallest = current_smallest;
  185. *largest = current_largest;
  186. initialized = true;
  187. } else {
  188. if (icmp_->Compare(current_smallest, *smallest) < 0) {
  189. *smallest = current_smallest;
  190. }
  191. if (icmp_->Compare(current_largest, *largest) > 0) {
  192. *largest = current_largest;
  193. }
  194. }
  195. }
  196. assert(initialized);
  197. }
  198. bool CompactionPicker::ExpandInputsToCleanCut(const std::string& /*cf_name*/,
  199. VersionStorageInfo* vstorage,
  200. CompactionInputFiles* inputs,
  201. InternalKey** next_smallest) {
  202. // This isn't good compaction
  203. assert(!inputs->empty());
  204. const int level = inputs->level;
  205. // GetOverlappingInputs will always do the right thing for level-0.
  206. // So we don't need to do any expansion if level == 0.
  207. if (level == 0) {
  208. return true;
  209. }
  210. InternalKey smallest, largest;
  211. // Keep expanding inputs until we are sure that there is a "clean cut"
  212. // boundary between the files in input and the surrounding files.
  213. // This will ensure that no parts of a key are lost during compaction.
  214. int hint_index = -1;
  215. size_t old_size;
  216. do {
  217. old_size = inputs->size();
  218. GetRange(*inputs, &smallest, &largest);
  219. inputs->clear();
  220. vstorage->GetOverlappingInputs(level, &smallest, &largest, &inputs->files,
  221. hint_index, &hint_index, true, nullptr,
  222. next_smallest);
  223. } while (inputs->size() > old_size);
  224. // we started off with inputs non-empty and the previous loop only grew
  225. // inputs. thus, inputs should be non-empty here
  226. assert(!inputs->empty());
  227. // If, after the expansion, there are files that are already under
  228. // compaction, then we must drop/cancel this compaction.
  229. if (AreFilesInCompaction(inputs->files)) {
  230. return false;
  231. }
  232. return true;
  233. }
  234. bool CompactionPicker::RangeOverlapWithCompaction(
  235. const Slice& smallest_user_key, const Slice& largest_user_key,
  236. int level) const {
  237. const Comparator* ucmp = icmp_->user_comparator();
  238. for (Compaction* c : compactions_in_progress_) {
  239. if (c->output_level() == level &&
  240. ucmp->CompareWithoutTimestamp(smallest_user_key,
  241. c->GetLargestUserKey()) <= 0 &&
  242. ucmp->CompareWithoutTimestamp(largest_user_key,
  243. c->GetSmallestUserKey()) >= 0) {
  244. // Overlap
  245. return true;
  246. }
  247. if (c->SupportsPerKeyPlacement()) {
  248. if (c->OverlapProximalLevelOutputRange(smallest_user_key,
  249. largest_user_key)) {
  250. return true;
  251. }
  252. }
  253. }
  254. // Did not overlap with any running compaction in level `level`
  255. return false;
  256. }
  257. bool CompactionPicker::FilesRangeOverlapWithCompaction(
  258. const std::vector<CompactionInputFiles>& inputs, int level,
  259. int proximal_level) const {
  260. bool is_empty = true;
  261. for (auto& in : inputs) {
  262. if (!in.empty()) {
  263. is_empty = false;
  264. break;
  265. }
  266. }
  267. if (is_empty) {
  268. // No files in inputs
  269. return false;
  270. }
  271. // TODO: Intra L0 compactions can have the ranges overlapped, but the input
  272. // files cannot be overlapped in the order of L0 files.
  273. InternalKey smallest, largest;
  274. GetRange(inputs, &smallest, &largest, Compaction::kInvalidLevel);
  275. if (proximal_level != Compaction::kInvalidLevel) {
  276. if (ioptions_.compaction_style == kCompactionStyleUniversal) {
  277. if (RangeOverlapWithCompaction(smallest.user_key(), largest.user_key(),
  278. proximal_level)) {
  279. return true;
  280. }
  281. } else {
  282. InternalKey proximal_smallest, proximal_largest;
  283. GetRange(inputs, &proximal_smallest, &proximal_largest, level);
  284. if (RangeOverlapWithCompaction(proximal_smallest.user_key(),
  285. proximal_largest.user_key(),
  286. proximal_level)) {
  287. return true;
  288. }
  289. }
  290. }
  291. return RangeOverlapWithCompaction(smallest.user_key(), largest.user_key(),
  292. level);
  293. }
  294. // Returns true if any one of specified files are being compacted
  295. bool CompactionPicker::AreFilesInCompaction(
  296. const std::vector<FileMetaData*>& files) {
  297. for (size_t i = 0; i < files.size(); i++) {
  298. if (files[i]->being_compacted) {
  299. return true;
  300. }
  301. }
  302. return false;
  303. }
  304. Compaction* CompactionPicker::PickCompactionForCompactFiles(
  305. const CompactionOptions& compact_options,
  306. const std::vector<CompactionInputFiles>& input_files, int output_level,
  307. VersionStorageInfo* vstorage, const MutableCFOptions& mutable_cf_options,
  308. const MutableDBOptions& mutable_db_options, uint32_t output_path_id,
  309. std::optional<SequenceNumber> earliest_snapshot,
  310. const SnapshotChecker* snapshot_checker) {
  311. #ifndef NDEBUG
  312. assert(input_files.size());
  313. // This compaction output should not overlap with a running compaction as
  314. // `SanitizeAndConvertCompactionInputFiles` should've checked earlier and db
  315. // mutex shouldn't have been released since.
  316. int start_level = Compaction::kInvalidLevel;
  317. for (const auto& in : input_files) {
  318. // input_files should already be sorted by level
  319. if (!in.empty()) {
  320. start_level = in.level;
  321. break;
  322. }
  323. }
  324. assert(output_level == 0 || !FilesRangeOverlapWithCompaction(
  325. input_files, output_level,
  326. Compaction::EvaluateProximalLevel(
  327. vstorage, mutable_cf_options, ioptions_,
  328. start_level, output_level)));
  329. #endif /* !NDEBUG */
  330. CompressionType compression_type;
  331. if (compact_options.compression == kDisableCompressionOption) {
  332. int base_level;
  333. if (ioptions_.compaction_style == kCompactionStyleLevel) {
  334. base_level = vstorage->base_level();
  335. } else {
  336. base_level = 1;
  337. }
  338. compression_type = GetCompressionType(vstorage, mutable_cf_options,
  339. output_level, base_level);
  340. } else {
  341. // TODO(ajkr): `CompactionOptions` offers configurable `CompressionType`
  342. // without configurable `CompressionOptions`, which is inconsistent.
  343. compression_type = compact_options.compression;
  344. }
  345. auto c = new Compaction(
  346. vstorage, ioptions_, mutable_cf_options, mutable_db_options, input_files,
  347. output_level, compact_options.output_file_size_limit,
  348. mutable_cf_options.max_compaction_bytes, output_path_id, compression_type,
  349. GetCompressionOptions(mutable_cf_options, vstorage, output_level),
  350. compact_options.output_temperature_override,
  351. compact_options.max_subcompactions,
  352. /* grandparents */ {}, earliest_snapshot, snapshot_checker,
  353. CompactionReason::kManualCompaction);
  354. RegisterCompaction(c);
  355. return c;
  356. }
  357. Status CompactionPicker::GetCompactionInputsFromFileNumbers(
  358. std::vector<CompactionInputFiles>* input_files,
  359. std::unordered_set<uint64_t>* input_set, const VersionStorageInfo* vstorage,
  360. const CompactionOptions& /*compact_options*/) const {
  361. if (input_set->size() == 0U) {
  362. return Status::InvalidArgument(
  363. "Compaction must include at least one file.");
  364. }
  365. assert(input_files);
  366. std::vector<CompactionInputFiles> matched_input_files;
  367. matched_input_files.resize(vstorage->num_levels());
  368. int first_non_empty_level = -1;
  369. int last_non_empty_level = -1;
  370. // TODO(yhchiang): use a lazy-initialized mapping from
  371. // file_number to FileMetaData in Version.
  372. for (int level = 0; level < vstorage->num_levels(); ++level) {
  373. for (auto file : vstorage->LevelFiles(level)) {
  374. auto iter = input_set->find(file->fd.GetNumber());
  375. if (iter != input_set->end()) {
  376. matched_input_files[level].files.push_back(file);
  377. input_set->erase(iter);
  378. last_non_empty_level = level;
  379. if (first_non_empty_level == -1) {
  380. first_non_empty_level = level;
  381. }
  382. }
  383. }
  384. }
  385. if (!input_set->empty()) {
  386. std::string message(
  387. "Cannot find matched SST files for the following file numbers:");
  388. for (auto fn : *input_set) {
  389. message += " ";
  390. message += std::to_string(fn);
  391. }
  392. return Status::InvalidArgument(message);
  393. }
  394. for (int level = first_non_empty_level; level <= last_non_empty_level;
  395. ++level) {
  396. matched_input_files[level].level = level;
  397. input_files->emplace_back(std::move(matched_input_files[level]));
  398. }
  399. return Status::OK();
  400. }
  401. // Returns true if any one of the parent files are being compacted
  402. bool CompactionPicker::IsRangeInCompaction(VersionStorageInfo* vstorage,
  403. const InternalKey* smallest,
  404. const InternalKey* largest,
  405. int level, int* level_index) {
  406. std::vector<FileMetaData*> inputs;
  407. assert(level < NumberLevels());
  408. vstorage->GetOverlappingInputs(level, smallest, largest, &inputs,
  409. level_index ? *level_index : 0, level_index);
  410. return AreFilesInCompaction(inputs);
  411. }
  412. // Populates the set of inputs of all other levels that overlap with the
  413. // start level.
  414. // Now we assume all levels except start level and output level are empty.
  415. // Will also attempt to expand "start level" if that doesn't expand
  416. // "output level" or cause "level" to include a file for compaction that has an
  417. // overlapping user-key with another file.
  418. // REQUIRES: input_level and output_level are different
  419. // REQUIRES: inputs->empty() == false
  420. // Returns false if files on parent level are currently in compaction, which
  421. // means that we can't compact them
  422. bool CompactionPicker::SetupOtherInputs(
  423. const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
  424. VersionStorageInfo* vstorage, CompactionInputFiles* inputs,
  425. CompactionInputFiles* output_level_inputs, int* parent_index,
  426. int base_index, bool only_expand_towards_right,
  427. const FileMetaData* starting_l0_file) {
  428. assert(!inputs->empty());
  429. assert(output_level_inputs->empty());
  430. const int input_level = inputs->level;
  431. const int output_level = output_level_inputs->level;
  432. if (input_level == output_level) {
  433. // no possibility of conflict
  434. return true;
  435. }
  436. // For now, we only support merging two levels, start level and output level.
  437. // We need to assert other levels are empty.
  438. for (int l = input_level + 1; l < output_level; l++) {
  439. assert(vstorage->NumLevelFiles(l) == 0);
  440. }
  441. InternalKey smallest, largest;
  442. // Get the range one last time.
  443. GetRange(*inputs, &smallest, &largest);
  444. // Populate the set of next-level files (inputs_GetOutputLevelInputs()) to
  445. // include in compaction
  446. vstorage->GetOverlappingInputs(output_level, &smallest, &largest,
  447. &output_level_inputs->files, *parent_index,
  448. parent_index);
  449. if (AreFilesInCompaction(output_level_inputs->files)) {
  450. return false;
  451. }
  452. if (!output_level_inputs->empty()) {
  453. if (!ExpandInputsToCleanCut(cf_name, vstorage, output_level_inputs)) {
  454. return false;
  455. }
  456. }
  457. // See if we can further grow the number of inputs in "level" without
  458. // changing the number of "level+1" files we pick up. We also choose NOT
  459. // to expand if this would cause "level" to include some entries for some
  460. // user key, while excluding other entries for the same user key. This
  461. // can happen when one user key spans multiple files.
  462. if (!output_level_inputs->empty()) {
  463. const uint64_t output_level_inputs_size =
  464. TotalFileSize(output_level_inputs->files);
  465. const uint64_t inputs_size = TotalFileSize(inputs->files);
  466. bool expand_inputs = false;
  467. CompactionInputFiles expanded_inputs;
  468. expanded_inputs.level = input_level;
  469. // Get closed interval of output level
  470. InternalKey all_start, all_limit;
  471. GetRange(*inputs, *output_level_inputs, &all_start, &all_limit);
  472. bool try_overlapping_inputs = true;
  473. if (only_expand_towards_right) {
  474. // Round-robin compaction only allows expansion towards the larger side.
  475. vstorage->GetOverlappingInputs(input_level, &smallest, &all_limit,
  476. &expanded_inputs.files, base_index,
  477. nullptr, true, starting_l0_file);
  478. } else {
  479. vstorage->GetOverlappingInputs(input_level, &all_start, &all_limit,
  480. &expanded_inputs.files, base_index,
  481. nullptr, true, starting_l0_file);
  482. }
  483. uint64_t expanded_inputs_size = TotalFileSize(expanded_inputs.files);
  484. if (!ExpandInputsToCleanCut(cf_name, vstorage, &expanded_inputs)) {
  485. try_overlapping_inputs = false;
  486. }
  487. // It helps to reduce write amp and avoid a further separate compaction
  488. // to include more input level files without expanding output level files.
  489. // So we apply a softer limit. We still need a limit to avoid overly large
  490. // compactions and potential high space amp spikes.
  491. const uint64_t limit =
  492. MultiplyCheckOverflow(mutable_cf_options.max_compaction_bytes, 2.0);
  493. if (try_overlapping_inputs && expanded_inputs.size() > inputs->size() &&
  494. !AreFilesInCompaction(expanded_inputs.files) &&
  495. output_level_inputs_size + expanded_inputs_size < limit) {
  496. InternalKey new_start, new_limit;
  497. GetRange(expanded_inputs, &new_start, &new_limit);
  498. CompactionInputFiles expanded_output_level_inputs;
  499. expanded_output_level_inputs.level = output_level;
  500. vstorage->GetOverlappingInputs(output_level, &new_start, &new_limit,
  501. &expanded_output_level_inputs.files,
  502. *parent_index, parent_index);
  503. assert(!expanded_output_level_inputs.empty());
  504. if (!AreFilesInCompaction(expanded_output_level_inputs.files) &&
  505. ExpandInputsToCleanCut(cf_name, vstorage,
  506. &expanded_output_level_inputs) &&
  507. expanded_output_level_inputs.size() == output_level_inputs->size()) {
  508. expand_inputs = true;
  509. }
  510. }
  511. if (!expand_inputs) {
  512. vstorage->GetCleanInputsWithinInterval(input_level, &all_start,
  513. &all_limit, &expanded_inputs.files,
  514. base_index, nullptr);
  515. expanded_inputs_size = TotalFileSize(expanded_inputs.files);
  516. if (expanded_inputs.size() > inputs->size() &&
  517. !AreFilesInCompaction(expanded_inputs.files) &&
  518. (output_level_inputs_size + expanded_inputs_size) < limit) {
  519. expand_inputs = true;
  520. }
  521. }
  522. if (expand_inputs) {
  523. ROCKS_LOG_INFO(ioptions_.logger,
  524. "[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
  525. "(%" PRIu64 "+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt
  526. "+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 " bytes)\n",
  527. cf_name.c_str(), input_level, inputs->size(),
  528. output_level_inputs->size(), inputs_size,
  529. output_level_inputs_size, expanded_inputs.size(),
  530. output_level_inputs->size(), expanded_inputs_size,
  531. output_level_inputs_size);
  532. inputs->files = expanded_inputs.files;
  533. }
  534. } else {
  535. // Likely to be trivial move. Expand files if they are still trivial moves,
  536. // but limit to mutable_cf_options.max_compaction_bytes or 8 files so that
  537. // we don't create too much compaction pressure for the next level.
  538. }
  539. return true;
  540. }
  541. void CompactionPicker::GetGrandparents(
  542. VersionStorageInfo* vstorage, const CompactionInputFiles& inputs,
  543. const CompactionInputFiles& output_level_inputs,
  544. std::vector<FileMetaData*>* grandparents) {
  545. InternalKey start, limit;
  546. GetRange(inputs, output_level_inputs, &start, &limit);
  547. // Compute the set of grandparent files that overlap this compaction
  548. // (parent == level+1; grandparent == level+2 or the first
  549. // level after that has overlapping files)
  550. for (int level = output_level_inputs.level + 1; level < NumberLevels();
  551. level++) {
  552. vstorage->GetOverlappingInputs(level, &start, &limit, grandparents);
  553. if (!grandparents->empty()) {
  554. break;
  555. }
  556. }
  557. }
  558. Compaction* CompactionPicker::PickCompactionForCompactRange(
  559. const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
  560. const MutableDBOptions& mutable_db_options, VersionStorageInfo* vstorage,
  561. int input_level, int output_level,
  562. const CompactRangeOptions& compact_range_options, const InternalKey* begin,
  563. const InternalKey* end, InternalKey** compaction_end, bool* manual_conflict,
  564. uint64_t max_file_num_to_ignore, const std::string& trim_ts) {
  565. // CompactionPickerFIFO has its own implementation of compact range
  566. assert(ioptions_.compaction_style != kCompactionStyleFIFO);
  567. if (input_level == ColumnFamilyData::kCompactAllLevels) {
  568. assert(ioptions_.compaction_style == kCompactionStyleUniversal);
  569. // Universal compaction with more than one level always compacts all the
  570. // files together to the last level.
  571. assert(vstorage->num_levels() > 1);
  572. int max_output_level = vstorage->MaxOutputLevel(
  573. ioptions_.cf_allow_ingest_behind || ioptions_.allow_ingest_behind);
  574. // DBImpl::CompactRange() set output level to be the last level
  575. assert(output_level == max_output_level);
  576. // DBImpl::RunManualCompaction will make full range for universal compaction
  577. assert(begin == nullptr);
  578. assert(end == nullptr);
  579. *compaction_end = nullptr;
  580. int start_level = 0;
  581. for (; start_level <= max_output_level &&
  582. vstorage->NumLevelFiles(start_level) == 0;
  583. start_level++) {
  584. }
  585. if (start_level > max_output_level) {
  586. return nullptr;
  587. }
  588. if ((start_level == 0) && (!level0_compactions_in_progress_.empty())) {
  589. *manual_conflict = true;
  590. // Only one level 0 compaction allowed
  591. return nullptr;
  592. }
  593. std::vector<CompactionInputFiles> inputs(max_output_level + 1 -
  594. start_level);
  595. for (int level = start_level; level <= max_output_level; level++) {
  596. inputs[level - start_level].level = level;
  597. auto& files = inputs[level - start_level].files;
  598. for (FileMetaData* f : vstorage->LevelFiles(level)) {
  599. files.push_back(f);
  600. }
  601. if (AreFilesInCompaction(files)) {
  602. *manual_conflict = true;
  603. return nullptr;
  604. }
  605. }
  606. // 2 non-exclusive manual compactions could run at the same time producing
  607. // overlaping outputs in the same level.
  608. if (FilesRangeOverlapWithCompaction(
  609. inputs, output_level,
  610. Compaction::EvaluateProximalLevel(vstorage, mutable_cf_options,
  611. ioptions_, start_level,
  612. output_level))) {
  613. // This compaction output could potentially conflict with the output
  614. // of a currently running compaction, we cannot run it.
  615. *manual_conflict = true;
  616. return nullptr;
  617. }
  618. Compaction* c = new Compaction(
  619. vstorage, ioptions_, mutable_cf_options, mutable_db_options,
  620. std::move(inputs), output_level,
  621. MaxFileSizeForLevel(mutable_cf_options, output_level,
  622. ioptions_.compaction_style),
  623. /* max_compaction_bytes */ LLONG_MAX,
  624. compact_range_options.target_path_id,
  625. GetCompressionType(vstorage, mutable_cf_options, output_level, 1),
  626. GetCompressionOptions(mutable_cf_options, vstorage, output_level),
  627. Temperature::kUnknown, compact_range_options.max_subcompactions,
  628. /* grandparents */ {}, /* earliest_snapshot */ std::nullopt,
  629. /* snapshot_checker */ nullptr, CompactionReason::kManualCompaction,
  630. trim_ts, /* score */ -1,
  631. /* l0_files_might_overlap */ true,
  632. compact_range_options.blob_garbage_collection_policy,
  633. compact_range_options.blob_garbage_collection_age_cutoff);
  634. RegisterCompaction(c);
  635. vstorage->ComputeCompactionScore(ioptions_, mutable_cf_options);
  636. return c;
  637. }
  638. CompactionInputFiles inputs;
  639. inputs.level = input_level;
  640. bool covering_the_whole_range = true;
  641. // All files are 'overlapping' in universal style compaction.
  642. // We have to compact the entire range in one shot.
  643. if (ioptions_.compaction_style == kCompactionStyleUniversal) {
  644. begin = nullptr;
  645. end = nullptr;
  646. }
  647. vstorage->GetOverlappingInputs(input_level, begin, end, &inputs.files);
  648. if (inputs.empty()) {
  649. return nullptr;
  650. }
  651. if ((input_level == 0) && (!level0_compactions_in_progress_.empty())) {
  652. // Only one level 0 compaction allowed
  653. TEST_SYNC_POINT("CompactionPicker::CompactRange:Conflict");
  654. *manual_conflict = true;
  655. return nullptr;
  656. }
  657. // Avoid compacting too much in one shot in case the range is large.
  658. // But we cannot do this for level-0 since level-0 files can overlap
  659. // and we must not pick one file and drop another older file if the
  660. // two files overlap.
  661. if (input_level > 0) {
  662. const uint64_t limit = mutable_cf_options.max_compaction_bytes;
  663. int hint_index = -1;
  664. assert(!inputs.empty());
  665. // Always include first file for progress.
  666. uint64_t input_level_total = inputs[0]->fd.GetFileSize();
  667. InternalKey* smallest = &(inputs[0]->smallest);
  668. InternalKey* largest = nullptr;
  669. for (size_t i = 1; i < inputs.size(); ++i) {
  670. // Consider whether to include inputs[i]
  671. largest = &inputs[i]->largest;
  672. uint64_t input_file_size = inputs[i]->fd.GetFileSize();
  673. uint64_t output_level_total = 0;
  674. if (output_level < vstorage->num_non_empty_levels()) {
  675. std::vector<FileMetaData*> files;
  676. vstorage->GetOverlappingInputsRangeBinarySearch(
  677. output_level, smallest, largest, &files, hint_index, &hint_index);
  678. for (const auto& file : files) {
  679. output_level_total += file->fd.GetFileSize();
  680. }
  681. }
  682. input_level_total += input_file_size;
  683. if (input_level_total + output_level_total > limit) {
  684. // To ensure compaction size is <= limit, leave out inputs from
  685. // index i onwards.
  686. covering_the_whole_range = false;
  687. inputs.files.resize(i);
  688. break;
  689. }
  690. }
  691. }
  692. assert(compact_range_options.target_path_id <
  693. static_cast<uint32_t>(ioptions_.cf_paths.size()));
  694. // for BOTTOM LEVEL compaction only, use max_file_num_to_ignore to filter out
  695. // files that are created during the current compaction.
  696. if ((compact_range_options.bottommost_level_compaction ==
  697. BottommostLevelCompaction::kForceOptimized ||
  698. compact_range_options.bottommost_level_compaction ==
  699. BottommostLevelCompaction::kIfHaveCompactionFilter) &&
  700. max_file_num_to_ignore != std::numeric_limits<uint64_t>::max()) {
  701. assert(input_level == output_level);
  702. // inputs_shrunk holds a continuous subset of input files which were all
  703. // created before the current manual compaction
  704. std::vector<FileMetaData*> inputs_shrunk;
  705. size_t skip_input_index = inputs.size();
  706. for (size_t i = 0; i < inputs.size(); ++i) {
  707. if (inputs[i]->fd.GetNumber() < max_file_num_to_ignore) {
  708. inputs_shrunk.push_back(inputs[i]);
  709. } else if (!inputs_shrunk.empty()) {
  710. // inputs[i] was created during the current manual compaction and
  711. // need to be skipped
  712. skip_input_index = i;
  713. break;
  714. }
  715. }
  716. if (inputs_shrunk.empty()) {
  717. return nullptr;
  718. }
  719. if (inputs.size() != inputs_shrunk.size()) {
  720. inputs.files.swap(inputs_shrunk);
  721. }
  722. // set covering_the_whole_range to false if there is any file that need to
  723. // be compacted in the range of inputs[skip_input_index+1, inputs.size())
  724. for (size_t i = skip_input_index + 1; i < inputs.size(); ++i) {
  725. if (inputs[i]->fd.GetNumber() < max_file_num_to_ignore) {
  726. covering_the_whole_range = false;
  727. }
  728. }
  729. }
  730. InternalKey key_storage;
  731. InternalKey* next_smallest = &key_storage;
  732. if (ExpandInputsToCleanCut(cf_name, vstorage, &inputs, &next_smallest) ==
  733. false) {
  734. // manual compaction is now multi-threaded, so it can
  735. // happen that ExpandWhileOverlapping fails
  736. // we handle it higher in RunManualCompaction
  737. *manual_conflict = true;
  738. return nullptr;
  739. }
  740. if (covering_the_whole_range || !next_smallest) {
  741. *compaction_end = nullptr;
  742. } else {
  743. **compaction_end = *next_smallest;
  744. }
  745. CompactionInputFiles output_level_inputs;
  746. if (output_level == ColumnFamilyData::kCompactToBaseLevel) {
  747. assert(input_level == 0);
  748. output_level = vstorage->base_level();
  749. assert(output_level > 0);
  750. }
  751. for (int i = input_level + 1; i < output_level; i++) {
  752. assert(vstorage->NumLevelFiles(i) == 0);
  753. }
  754. output_level_inputs.level = output_level;
  755. if (input_level != output_level) {
  756. int parent_index = -1;
  757. if (!SetupOtherInputs(cf_name, mutable_cf_options, vstorage, &inputs,
  758. &output_level_inputs, &parent_index, -1)) {
  759. // manual compaction is now multi-threaded, so it can
  760. // happen that SetupOtherInputs fails
  761. // we handle it higher in RunManualCompaction
  762. *manual_conflict = true;
  763. return nullptr;
  764. }
  765. }
  766. std::vector<CompactionInputFiles> compaction_inputs({inputs});
  767. if (!output_level_inputs.empty()) {
  768. compaction_inputs.push_back(output_level_inputs);
  769. }
  770. for (size_t i = 0; i < compaction_inputs.size(); i++) {
  771. if (AreFilesInCompaction(compaction_inputs[i].files)) {
  772. *manual_conflict = true;
  773. return nullptr;
  774. }
  775. }
  776. // 2 non-exclusive manual compactions could run at the same time producing
  777. // overlaping outputs in the same level.
  778. if (FilesRangeOverlapWithCompaction(
  779. compaction_inputs, output_level,
  780. Compaction::EvaluateProximalLevel(vstorage, mutable_cf_options,
  781. ioptions_, input_level,
  782. output_level))) {
  783. // This compaction output could potentially conflict with the output
  784. // of a currently running compaction, we cannot run it.
  785. *manual_conflict = true;
  786. return nullptr;
  787. }
  788. std::vector<FileMetaData*> grandparents;
  789. GetGrandparents(vstorage, inputs, output_level_inputs, &grandparents);
  790. Compaction* compaction = new Compaction(
  791. vstorage, ioptions_, mutable_cf_options, mutable_db_options,
  792. std::move(compaction_inputs), output_level,
  793. MaxFileSizeForLevel(mutable_cf_options, output_level,
  794. ioptions_.compaction_style, vstorage->base_level(),
  795. ioptions_.level_compaction_dynamic_level_bytes),
  796. mutable_cf_options.max_compaction_bytes,
  797. compact_range_options.target_path_id,
  798. GetCompressionType(vstorage, mutable_cf_options, output_level,
  799. vstorage->base_level()),
  800. GetCompressionOptions(mutable_cf_options, vstorage, output_level),
  801. Temperature::kUnknown, compact_range_options.max_subcompactions,
  802. std::move(grandparents),
  803. /* earliest_snapshot */ std::nullopt, /* snapshot_checker */ nullptr,
  804. CompactionReason::kManualCompaction, trim_ts, /* score */ -1,
  805. /* l0_files_might_overlap */ true,
  806. compact_range_options.blob_garbage_collection_policy,
  807. compact_range_options.blob_garbage_collection_age_cutoff);
  808. TEST_SYNC_POINT_CALLBACK("CompactionPicker::CompactRange:Return", compaction);
  809. RegisterCompaction(compaction);
  810. // Creating a compaction influences the compaction score because the score
  811. // takes running compactions into account (by skipping files that are already
  812. // being compacted). Since we just changed compaction score, we recalculate it
  813. // here
  814. vstorage->ComputeCompactionScore(ioptions_, mutable_cf_options);
  815. return compaction;
  816. }
  817. namespace {
  818. // Test whether two files have overlapping key-ranges.
  819. bool HaveOverlappingKeyRanges(const Comparator* c, const SstFileMetaData& a,
  820. const SstFileMetaData& b) {
  821. if (c->CompareWithoutTimestamp(a.smallestkey, b.smallestkey) >= 0) {
  822. if (c->CompareWithoutTimestamp(a.smallestkey, b.largestkey) <= 0) {
  823. // b.smallestkey <= a.smallestkey <= b.largestkey
  824. return true;
  825. }
  826. } else if (c->CompareWithoutTimestamp(a.largestkey, b.smallestkey) >= 0) {
  827. // a.smallestkey < b.smallestkey <= a.largestkey
  828. return true;
  829. }
  830. if (c->CompareWithoutTimestamp(a.largestkey, b.largestkey) <= 0) {
  831. if (c->CompareWithoutTimestamp(a.largestkey, b.smallestkey) >= 0) {
  832. // b.smallestkey <= a.largestkey <= b.largestkey
  833. return true;
  834. }
  835. } else if (c->CompareWithoutTimestamp(a.smallestkey, b.largestkey) <= 0) {
  836. // a.smallestkey <= b.largestkey < a.largestkey
  837. return true;
  838. }
  839. return false;
  840. }
  841. } // namespace
  842. Status CompactionPicker::SanitizeCompactionInputFilesForAllLevels(
  843. std::unordered_set<uint64_t>* input_files,
  844. const ColumnFamilyMetaData& cf_meta, const int output_level) const {
  845. auto& levels = cf_meta.levels;
  846. auto comparator = icmp_->user_comparator();
  847. // TODO(yhchiang): add is_adjustable to CompactionOptions
  848. // the smallest and largest key of the current compaction input
  849. std::string smallestkey;
  850. std::string largestkey;
  851. // a flag for initializing smallest and largest key
  852. bool is_first = false;
  853. const int kNotFound = -1;
  854. // For each level, it does the following things:
  855. // 1. Find the first and the last compaction input files
  856. // in the current level.
  857. // 2. Include all files between the first and the last
  858. // compaction input files.
  859. // 3. Update the compaction key-range.
  860. // 4. For all remaining levels, include files that have
  861. // overlapping key-range with the compaction key-range.
  862. for (int l = 0; l <= output_level; ++l) {
  863. auto& current_files = levels[l].files;
  864. int first_included = static_cast<int>(current_files.size());
  865. int last_included = kNotFound;
  866. // identify the first and the last compaction input files
  867. // in the current level.
  868. for (size_t f = 0; f < current_files.size(); ++f) {
  869. const uint64_t file_number = TableFileNameToNumber(current_files[f].name);
  870. if (input_files->find(file_number) == input_files->end()) {
  871. continue;
  872. }
  873. first_included = std::min(first_included, static_cast<int>(f));
  874. last_included = std::max(last_included, static_cast<int>(f));
  875. if (is_first == false) {
  876. smallestkey = current_files[f].smallestkey;
  877. largestkey = current_files[f].largestkey;
  878. is_first = true;
  879. }
  880. }
  881. if (last_included == kNotFound) {
  882. continue;
  883. }
  884. if (l != 0) {
  885. // expand the compaction input of the current level if it
  886. // has overlapping key-range with other non-compaction input
  887. // files in the same level.
  888. while (first_included > 0) {
  889. if (comparator->CompareWithoutTimestamp(
  890. current_files[first_included - 1].largestkey,
  891. current_files[first_included].smallestkey) < 0) {
  892. break;
  893. }
  894. first_included--;
  895. }
  896. while (last_included < static_cast<int>(current_files.size()) - 1) {
  897. if (comparator->CompareWithoutTimestamp(
  898. current_files[last_included + 1].smallestkey,
  899. current_files[last_included].largestkey) > 0) {
  900. break;
  901. }
  902. last_included++;
  903. }
  904. } else if (output_level > 0) {
  905. last_included = static_cast<int>(current_files.size() - 1);
  906. }
  907. // include all files between the first and the last compaction input files.
  908. for (int f = first_included; f <= last_included; ++f) {
  909. if (current_files[f].being_compacted) {
  910. return Status::Aborted("Necessary compaction input file " +
  911. current_files[f].name +
  912. " is currently being compacted.");
  913. }
  914. input_files->insert(TableFileNameToNumber(current_files[f].name));
  915. }
  916. // update smallest and largest key
  917. if (l == 0) {
  918. for (int f = first_included; f <= last_included; ++f) {
  919. if (comparator->CompareWithoutTimestamp(
  920. smallestkey, current_files[f].smallestkey) > 0) {
  921. smallestkey = current_files[f].smallestkey;
  922. }
  923. if (comparator->CompareWithoutTimestamp(
  924. largestkey, current_files[f].largestkey) < 0) {
  925. largestkey = current_files[f].largestkey;
  926. }
  927. }
  928. } else {
  929. if (comparator->CompareWithoutTimestamp(
  930. smallestkey, current_files[first_included].smallestkey) > 0) {
  931. smallestkey = current_files[first_included].smallestkey;
  932. }
  933. if (comparator->CompareWithoutTimestamp(
  934. largestkey, current_files[last_included].largestkey) < 0) {
  935. largestkey = current_files[last_included].largestkey;
  936. }
  937. }
  938. SstFileMetaData aggregated_file_meta;
  939. aggregated_file_meta.smallestkey = smallestkey;
  940. aggregated_file_meta.largestkey = largestkey;
  941. // For all lower levels, include all overlapping files.
  942. // We need to add overlapping files from the current level too because even
  943. // if there no input_files in level l, we would still need to add files
  944. // which overlap with the range containing the input_files in levels 0 to l
  945. // Level 0 doesn't need to be handled this way because files are sorted by
  946. // time and not by key
  947. for (int m = std::max(l, 1); m <= output_level; ++m) {
  948. for (auto& next_lv_file : levels[m].files) {
  949. if (HaveOverlappingKeyRanges(comparator, aggregated_file_meta,
  950. next_lv_file)) {
  951. if (next_lv_file.being_compacted) {
  952. return Status::Aborted(
  953. "File " + next_lv_file.name +
  954. " that has overlapping key range with one of the compaction "
  955. " input file is currently being compacted.");
  956. }
  957. input_files->insert(TableFileNameToNumber(next_lv_file.name));
  958. }
  959. }
  960. }
  961. }
  962. return Status::OK();
  963. }
  964. Status CompactionPicker::SanitizeAndConvertCompactionInputFiles(
  965. std::unordered_set<uint64_t>* input_files, const int output_level,
  966. Version* version,
  967. std::vector<CompactionInputFiles>* converted_input_files) const {
  968. ColumnFamilyMetaData cf_meta;
  969. version->GetColumnFamilyMetaData(&cf_meta);
  970. assert(static_cast<int>(cf_meta.levels.size()) - 1 ==
  971. cf_meta.levels[cf_meta.levels.size() - 1].level);
  972. assert(converted_input_files);
  973. if (output_level >= static_cast<int>(cf_meta.levels.size())) {
  974. return Status::InvalidArgument(
  975. "Output level for column family " + cf_meta.name +
  976. " must between [0, " +
  977. std::to_string(cf_meta.levels[cf_meta.levels.size() - 1].level) + "].");
  978. }
  979. if (output_level > MaxOutputLevel()) {
  980. return Status::InvalidArgument(
  981. "Exceed the maximum output level defined by "
  982. "the current compaction algorithm --- " +
  983. std::to_string(MaxOutputLevel()));
  984. }
  985. if (output_level < 0) {
  986. return Status::InvalidArgument("Output level cannot be negative.");
  987. }
  988. if (input_files->size() == 0) {
  989. return Status::InvalidArgument(
  990. "A compaction must contain at least one file.");
  991. }
  992. Status s = SanitizeCompactionInputFilesForAllLevels(input_files, cf_meta,
  993. output_level);
  994. if (!s.ok()) {
  995. return s;
  996. }
  997. // for all input files, check whether the file number matches
  998. // any currently-existing files.
  999. for (auto file_num : *input_files) {
  1000. bool found = false;
  1001. int input_file_level = -1;
  1002. for (const auto& level_meta : cf_meta.levels) {
  1003. for (const auto& file_meta : level_meta.files) {
  1004. if (file_num == TableFileNameToNumber(file_meta.name)) {
  1005. if (file_meta.being_compacted) {
  1006. return Status::Aborted("Specified compaction input file " +
  1007. MakeTableFileName("", file_num) +
  1008. " is already being compacted.");
  1009. }
  1010. found = true;
  1011. input_file_level = level_meta.level;
  1012. break;
  1013. }
  1014. }
  1015. if (found) {
  1016. break;
  1017. }
  1018. }
  1019. if (!found) {
  1020. return Status::InvalidArgument(
  1021. "Specified compaction input file " + MakeTableFileName("", file_num) +
  1022. " does not exist in column family " + cf_meta.name + ".");
  1023. }
  1024. if (input_file_level > output_level) {
  1025. return Status::InvalidArgument(
  1026. "Cannot compact file to up level, input file: " +
  1027. MakeTableFileName("", file_num) + " level " +
  1028. std::to_string(input_file_level) + " > output level " +
  1029. std::to_string(output_level));
  1030. }
  1031. }
  1032. s = GetCompactionInputsFromFileNumbers(converted_input_files, input_files,
  1033. version->storage_info(),
  1034. CompactionOptions());
  1035. if (!s.ok()) {
  1036. return s;
  1037. }
  1038. assert(converted_input_files->size() > 0);
  1039. if (output_level != 0 &&
  1040. FilesRangeOverlapWithCompaction(
  1041. *converted_input_files, output_level,
  1042. Compaction::EvaluateProximalLevel(
  1043. version->storage_info(), version->GetMutableCFOptions(),
  1044. ioptions_, (*converted_input_files)[0].level, output_level))) {
  1045. return Status::Aborted(
  1046. "A running compaction is writing to the same output level(s) in an "
  1047. "overlapping key range");
  1048. }
  1049. return Status::OK();
  1050. }
  1051. void CompactionPicker::RegisterCompaction(Compaction* c) {
  1052. if (c == nullptr) {
  1053. return;
  1054. }
  1055. assert(ioptions_.compaction_style != kCompactionStyleLevel ||
  1056. c->output_level() == 0 ||
  1057. !FilesRangeOverlapWithCompaction(*c->inputs(), c->output_level(),
  1058. c->GetProximalLevel()));
  1059. // CompactionReason::kExternalSstIngestion's start level is just a placeholder
  1060. // number without actual meaning as file ingestion technically does not have
  1061. // an input level like other compactions
  1062. if ((c->start_level() == 0 &&
  1063. c->compaction_reason() != CompactionReason::kExternalSstIngestion) ||
  1064. ioptions_.compaction_style == kCompactionStyleUniversal) {
  1065. level0_compactions_in_progress_.insert(c);
  1066. }
  1067. compactions_in_progress_.insert(c);
  1068. TEST_SYNC_POINT_CALLBACK("CompactionPicker::RegisterCompaction:Registered",
  1069. c);
  1070. }
  1071. void CompactionPicker::UnregisterCompaction(Compaction* c) {
  1072. if (c == nullptr) {
  1073. return;
  1074. }
  1075. if (c->start_level() == 0 ||
  1076. ioptions_.compaction_style == kCompactionStyleUniversal) {
  1077. level0_compactions_in_progress_.erase(c);
  1078. }
  1079. compactions_in_progress_.erase(c);
  1080. }
  1081. void CompactionPicker::PickFilesMarkedForCompaction(
  1082. const std::string& cf_name, VersionStorageInfo* vstorage, int* start_level,
  1083. int* output_level, CompactionInputFiles* start_level_inputs,
  1084. std::function<bool(const FileMetaData*)> skip_marked_file) {
  1085. if (vstorage->FilesMarkedForCompaction().empty()) {
  1086. return;
  1087. }
  1088. auto continuation = [&, cf_name](std::pair<int, FileMetaData*> level_file) {
  1089. // If it's being compacted it has nothing to do here.
  1090. // If this assert() fails that means that some function marked some
  1091. // files as being_compacted, but didn't call ComputeCompactionScore()
  1092. assert(!level_file.second->being_compacted);
  1093. if (skip_marked_file(level_file.second)) {
  1094. return false;
  1095. }
  1096. *start_level = level_file.first;
  1097. *output_level =
  1098. (*start_level == 0) ? vstorage->base_level() : *start_level + 1;
  1099. if (*start_level == 0 && !level0_compactions_in_progress()->empty()) {
  1100. return false;
  1101. }
  1102. start_level_inputs->files = {level_file.second};
  1103. start_level_inputs->level = *start_level;
  1104. return ExpandInputsToCleanCut(cf_name, vstorage, start_level_inputs);
  1105. };
  1106. // take a chance on a random file first
  1107. Random64 rnd(/* seed */ reinterpret_cast<uint64_t>(vstorage));
  1108. size_t random_file_index = static_cast<size_t>(rnd.Uniform(
  1109. static_cast<uint64_t>(vstorage->FilesMarkedForCompaction().size())));
  1110. TEST_SYNC_POINT_CALLBACK("CompactionPicker::PickFilesMarkedForCompaction",
  1111. &random_file_index);
  1112. if (continuation(vstorage->FilesMarkedForCompaction()[random_file_index])) {
  1113. // found the compaction!
  1114. return;
  1115. }
  1116. for (auto& level_file : vstorage->FilesMarkedForCompaction()) {
  1117. if (continuation(level_file)) {
  1118. // found the compaction!
  1119. return;
  1120. }
  1121. }
  1122. start_level_inputs->files.clear();
  1123. }
  1124. bool CompactionPicker::GetOverlappingL0Files(
  1125. VersionStorageInfo* vstorage, CompactionInputFiles* start_level_inputs,
  1126. int output_level, int* parent_index, const FileMetaData* starting_l0_file) {
  1127. // Two level 0 compaction won't run at the same time, so don't need to worry
  1128. // about files on level 0 being compacted.
  1129. assert(level0_compactions_in_progress()->empty());
  1130. InternalKey smallest, largest;
  1131. GetRange(*start_level_inputs, &smallest, &largest);
  1132. // Note that the next call will discard the file we placed in
  1133. // c->inputs_[0] earlier and replace it with an overlapping set
  1134. // which will include the picked file.
  1135. start_level_inputs->files.clear();
  1136. vstorage->GetOverlappingInputs(0, &smallest, &largest,
  1137. &(start_level_inputs->files),
  1138. /*hint_index=*/-1,
  1139. /*file_index=*/nullptr,
  1140. /*expand_range=*/true,
  1141. /*starting_l0_file=*/starting_l0_file);
  1142. // If we include more L0 files in the same compaction run it can
  1143. // cause the 'smallest' and 'largest' key to get extended to a
  1144. // larger range. So, re-invoke GetRange to get the new key range
  1145. GetRange(*start_level_inputs, &smallest, &largest);
  1146. if (IsRangeInCompaction(vstorage, &smallest, &largest, output_level,
  1147. parent_index)) {
  1148. return false;
  1149. }
  1150. assert(!start_level_inputs->files.empty());
  1151. return true;
  1152. }
  1153. } // namespace ROCKSDB_NAMESPACE