compaction_picker.cc 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "db/compaction/compaction_picker.h"
  10. #include <cinttypes>
  11. #include <limits>
  12. #include <queue>
  13. #include <string>
  14. #include <utility>
  15. #include <vector>
  16. #include "db/column_family.h"
  17. #include "file/filename.h"
  18. #include "logging/log_buffer.h"
  19. #include "monitoring/statistics.h"
  20. #include "test_util/sync_point.h"
  21. #include "util/random.h"
  22. #include "util/string_util.h"
  23. namespace ROCKSDB_NAMESPACE {
  24. namespace {
  25. uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
  26. uint64_t sum = 0;
  27. for (size_t i = 0; i < files.size() && files[i]; i++) {
  28. sum += files[i]->compensated_file_size;
  29. }
  30. return sum;
  31. }
  32. } // anonymous namespace
  33. bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
  34. size_t min_files_to_compact,
  35. uint64_t max_compact_bytes_per_del_file,
  36. uint64_t max_compaction_bytes,
  37. CompactionInputFiles* comp_inputs,
  38. SequenceNumber earliest_mem_seqno) {
  39. // Do not pick ingested file when there is at least one memtable not flushed
  40. // which of seqno is overlap with the sst.
  41. TEST_SYNC_POINT("FindIntraL0Compaction");
  42. size_t start = 0;
  43. for (; start < level_files.size(); start++) {
  44. if (level_files[start]->being_compacted) {
  45. return false;
  46. }
  47. // If there is no data in memtable, the earliest sequence number would the
  48. // largest sequence number in last memtable.
  49. // Because all files are sorted in descending order by largest_seqno, so we
  50. // only need to check the first one.
  51. if (level_files[start]->fd.largest_seqno <= earliest_mem_seqno) {
  52. break;
  53. }
  54. }
  55. if (start >= level_files.size()) {
  56. return false;
  57. }
  58. size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size);
  59. uint64_t compensated_compact_bytes =
  60. level_files[start]->compensated_file_size;
  61. size_t compact_bytes_per_del_file = port::kMaxSizet;
  62. // Compaction range will be [start, limit).
  63. size_t limit;
  64. // Pull in files until the amount of compaction work per deleted file begins
  65. // increasing or maximum total compaction size is reached.
  66. size_t new_compact_bytes_per_del_file = 0;
  67. for (limit = start + 1; limit < level_files.size(); ++limit) {
  68. compact_bytes += static_cast<size_t>(level_files[limit]->fd.file_size);
  69. compensated_compact_bytes += level_files[limit]->compensated_file_size;
  70. new_compact_bytes_per_del_file = compact_bytes / (limit - start);
  71. if (level_files[limit]->being_compacted ||
  72. new_compact_bytes_per_del_file > compact_bytes_per_del_file ||
  73. compensated_compact_bytes > max_compaction_bytes) {
  74. break;
  75. }
  76. compact_bytes_per_del_file = new_compact_bytes_per_del_file;
  77. }
  78. if ((limit - start) >= min_files_to_compact &&
  79. compact_bytes_per_del_file < max_compact_bytes_per_del_file) {
  80. assert(comp_inputs != nullptr);
  81. comp_inputs->level = 0;
  82. for (size_t i = start; i < limit; ++i) {
  83. comp_inputs->files.push_back(level_files[i]);
  84. }
  85. return true;
  86. }
  87. return false;
  88. }
  89. // Determine compression type, based on user options, level of the output
  90. // file and whether compression is disabled.
  91. // If enable_compression is false, then compression is always disabled no
  92. // matter what the values of the other two parameters are.
  93. // Otherwise, the compression type is determined based on options and level.
  94. CompressionType GetCompressionType(const ImmutableCFOptions& ioptions,
  95. const VersionStorageInfo* vstorage,
  96. const MutableCFOptions& mutable_cf_options,
  97. int level, int base_level,
  98. const bool enable_compression) {
  99. if (!enable_compression) {
  100. // disable compression
  101. return kNoCompression;
  102. }
  103. // If bottommost_compression is set and we are compacting to the
  104. // bottommost level then we should use it.
  105. if (ioptions.bottommost_compression != kDisableCompressionOption &&
  106. level >= (vstorage->num_non_empty_levels() - 1)) {
  107. return ioptions.bottommost_compression;
  108. }
  109. // If the user has specified a different compression level for each level,
  110. // then pick the compression for that level.
  111. if (!ioptions.compression_per_level.empty()) {
  112. assert(level == 0 || level >= base_level);
  113. int idx = (level == 0) ? 0 : level - base_level + 1;
  114. const int n = static_cast<int>(ioptions.compression_per_level.size()) - 1;
  115. // It is possible for level_ to be -1; in that case, we use level
  116. // 0's compression. This occurs mostly in backwards compatibility
  117. // situations when the builder doesn't know what level the file
  118. // belongs to. Likewise, if level is beyond the end of the
  119. // specified compression levels, use the last value.
  120. return ioptions.compression_per_level[std::max(0, std::min(idx, n))];
  121. } else {
  122. return mutable_cf_options.compression;
  123. }
  124. }
  125. CompressionOptions GetCompressionOptions(const ImmutableCFOptions& ioptions,
  126. const VersionStorageInfo* vstorage,
  127. int level,
  128. const bool enable_compression) {
  129. if (!enable_compression) {
  130. return ioptions.compression_opts;
  131. }
  132. // If bottommost_compression is set and we are compacting to the
  133. // bottommost level then we should use the specified compression options
  134. // for the bottmomost_compression.
  135. if (ioptions.bottommost_compression != kDisableCompressionOption &&
  136. level >= (vstorage->num_non_empty_levels() - 1) &&
  137. ioptions.bottommost_compression_opts.enabled) {
  138. return ioptions.bottommost_compression_opts;
  139. }
  140. return ioptions.compression_opts;
  141. }
  142. CompactionPicker::CompactionPicker(const ImmutableCFOptions& ioptions,
  143. const InternalKeyComparator* icmp)
  144. : ioptions_(ioptions), icmp_(icmp) {}
  145. CompactionPicker::~CompactionPicker() {}
  146. // Delete this compaction from the list of running compactions.
  147. void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) {
  148. UnregisterCompaction(c);
  149. if (!status.ok()) {
  150. c->ResetNextCompactionIndex();
  151. }
  152. }
  153. void CompactionPicker::GetRange(const CompactionInputFiles& inputs,
  154. InternalKey* smallest,
  155. InternalKey* largest) const {
  156. const int level = inputs.level;
  157. assert(!inputs.empty());
  158. smallest->Clear();
  159. largest->Clear();
  160. if (level == 0) {
  161. for (size_t i = 0; i < inputs.size(); i++) {
  162. FileMetaData* f = inputs[i];
  163. if (i == 0) {
  164. *smallest = f->smallest;
  165. *largest = f->largest;
  166. } else {
  167. if (icmp_->Compare(f->smallest, *smallest) < 0) {
  168. *smallest = f->smallest;
  169. }
  170. if (icmp_->Compare(f->largest, *largest) > 0) {
  171. *largest = f->largest;
  172. }
  173. }
  174. }
  175. } else {
  176. *smallest = inputs[0]->smallest;
  177. *largest = inputs[inputs.size() - 1]->largest;
  178. }
  179. }
  180. void CompactionPicker::GetRange(const CompactionInputFiles& inputs1,
  181. const CompactionInputFiles& inputs2,
  182. InternalKey* smallest,
  183. InternalKey* largest) const {
  184. assert(!inputs1.empty() || !inputs2.empty());
  185. if (inputs1.empty()) {
  186. GetRange(inputs2, smallest, largest);
  187. } else if (inputs2.empty()) {
  188. GetRange(inputs1, smallest, largest);
  189. } else {
  190. InternalKey smallest1, smallest2, largest1, largest2;
  191. GetRange(inputs1, &smallest1, &largest1);
  192. GetRange(inputs2, &smallest2, &largest2);
  193. *smallest =
  194. icmp_->Compare(smallest1, smallest2) < 0 ? smallest1 : smallest2;
  195. *largest = icmp_->Compare(largest1, largest2) < 0 ? largest2 : largest1;
  196. }
  197. }
  198. void CompactionPicker::GetRange(const std::vector<CompactionInputFiles>& inputs,
  199. InternalKey* smallest,
  200. InternalKey* largest) const {
  201. InternalKey current_smallest;
  202. InternalKey current_largest;
  203. bool initialized = false;
  204. for (const auto& in : inputs) {
  205. if (in.empty()) {
  206. continue;
  207. }
  208. GetRange(in, &current_smallest, &current_largest);
  209. if (!initialized) {
  210. *smallest = current_smallest;
  211. *largest = current_largest;
  212. initialized = true;
  213. } else {
  214. if (icmp_->Compare(current_smallest, *smallest) < 0) {
  215. *smallest = current_smallest;
  216. }
  217. if (icmp_->Compare(current_largest, *largest) > 0) {
  218. *largest = current_largest;
  219. }
  220. }
  221. }
  222. assert(initialized);
  223. }
  224. bool CompactionPicker::ExpandInputsToCleanCut(const std::string& /*cf_name*/,
  225. VersionStorageInfo* vstorage,
  226. CompactionInputFiles* inputs,
  227. InternalKey** next_smallest) {
  228. // This isn't good compaction
  229. assert(!inputs->empty());
  230. const int level = inputs->level;
  231. // GetOverlappingInputs will always do the right thing for level-0.
  232. // So we don't need to do any expansion if level == 0.
  233. if (level == 0) {
  234. return true;
  235. }
  236. InternalKey smallest, largest;
  237. // Keep expanding inputs until we are sure that there is a "clean cut"
  238. // boundary between the files in input and the surrounding files.
  239. // This will ensure that no parts of a key are lost during compaction.
  240. int hint_index = -1;
  241. size_t old_size;
  242. do {
  243. old_size = inputs->size();
  244. GetRange(*inputs, &smallest, &largest);
  245. inputs->clear();
  246. vstorage->GetOverlappingInputs(level, &smallest, &largest, &inputs->files,
  247. hint_index, &hint_index, true,
  248. next_smallest);
  249. } while (inputs->size() > old_size);
  250. // we started off with inputs non-empty and the previous loop only grew
  251. // inputs. thus, inputs should be non-empty here
  252. assert(!inputs->empty());
  253. // If, after the expansion, there are files that are already under
  254. // compaction, then we must drop/cancel this compaction.
  255. if (AreFilesInCompaction(inputs->files)) {
  256. return false;
  257. }
  258. return true;
  259. }
  260. bool CompactionPicker::RangeOverlapWithCompaction(
  261. const Slice& smallest_user_key, const Slice& largest_user_key,
  262. int level) const {
  263. const Comparator* ucmp = icmp_->user_comparator();
  264. for (Compaction* c : compactions_in_progress_) {
  265. if (c->output_level() == level &&
  266. ucmp->Compare(smallest_user_key, c->GetLargestUserKey()) <= 0 &&
  267. ucmp->Compare(largest_user_key, c->GetSmallestUserKey()) >= 0) {
  268. // Overlap
  269. return true;
  270. }
  271. }
  272. // Did not overlap with any running compaction in level `level`
  273. return false;
  274. }
  275. bool CompactionPicker::FilesRangeOverlapWithCompaction(
  276. const std::vector<CompactionInputFiles>& inputs, int level) const {
  277. bool is_empty = true;
  278. for (auto& in : inputs) {
  279. if (!in.empty()) {
  280. is_empty = false;
  281. break;
  282. }
  283. }
  284. if (is_empty) {
  285. // No files in inputs
  286. return false;
  287. }
  288. InternalKey smallest, largest;
  289. GetRange(inputs, &smallest, &largest);
  290. return RangeOverlapWithCompaction(smallest.user_key(), largest.user_key(),
  291. level);
  292. }
  293. // Returns true if any one of specified files are being compacted
  294. bool CompactionPicker::AreFilesInCompaction(
  295. const std::vector<FileMetaData*>& files) {
  296. for (size_t i = 0; i < files.size(); i++) {
  297. if (files[i]->being_compacted) {
  298. return true;
  299. }
  300. }
  301. return false;
  302. }
  303. Compaction* CompactionPicker::CompactFiles(
  304. const CompactionOptions& compact_options,
  305. const std::vector<CompactionInputFiles>& input_files, int output_level,
  306. VersionStorageInfo* vstorage, const MutableCFOptions& mutable_cf_options,
  307. uint32_t output_path_id) {
  308. assert(input_files.size());
  309. // This compaction output should not overlap with a running compaction as
  310. // `SanitizeCompactionInputFiles` should've checked earlier and db mutex
  311. // shouldn't have been released since.
  312. assert(!FilesRangeOverlapWithCompaction(input_files, output_level));
  313. CompressionType compression_type;
  314. if (compact_options.compression == kDisableCompressionOption) {
  315. int base_level;
  316. if (ioptions_.compaction_style == kCompactionStyleLevel) {
  317. base_level = vstorage->base_level();
  318. } else {
  319. base_level = 1;
  320. }
  321. compression_type =
  322. GetCompressionType(ioptions_, vstorage, mutable_cf_options,
  323. output_level, base_level);
  324. } else {
  325. // TODO(ajkr): `CompactionOptions` offers configurable `CompressionType`
  326. // without configurable `CompressionOptions`, which is inconsistent.
  327. compression_type = compact_options.compression;
  328. }
  329. auto c = new Compaction(
  330. vstorage, ioptions_, mutable_cf_options, input_files, output_level,
  331. compact_options.output_file_size_limit,
  332. mutable_cf_options.max_compaction_bytes, output_path_id, compression_type,
  333. GetCompressionOptions(ioptions_, vstorage, output_level),
  334. compact_options.max_subcompactions,
  335. /* grandparents */ {}, true);
  336. RegisterCompaction(c);
  337. return c;
  338. }
  339. Status CompactionPicker::GetCompactionInputsFromFileNumbers(
  340. std::vector<CompactionInputFiles>* input_files,
  341. std::unordered_set<uint64_t>* input_set, const VersionStorageInfo* vstorage,
  342. const CompactionOptions& /*compact_options*/) const {
  343. if (input_set->size() == 0U) {
  344. return Status::InvalidArgument(
  345. "Compaction must include at least one file.");
  346. }
  347. assert(input_files);
  348. std::vector<CompactionInputFiles> matched_input_files;
  349. matched_input_files.resize(vstorage->num_levels());
  350. int first_non_empty_level = -1;
  351. int last_non_empty_level = -1;
  352. // TODO(yhchiang): use a lazy-initialized mapping from
  353. // file_number to FileMetaData in Version.
  354. for (int level = 0; level < vstorage->num_levels(); ++level) {
  355. for (auto file : vstorage->LevelFiles(level)) {
  356. auto iter = input_set->find(file->fd.GetNumber());
  357. if (iter != input_set->end()) {
  358. matched_input_files[level].files.push_back(file);
  359. input_set->erase(iter);
  360. last_non_empty_level = level;
  361. if (first_non_empty_level == -1) {
  362. first_non_empty_level = level;
  363. }
  364. }
  365. }
  366. }
  367. if (!input_set->empty()) {
  368. std::string message(
  369. "Cannot find matched SST files for the following file numbers:");
  370. for (auto fn : *input_set) {
  371. message += " ";
  372. message += ToString(fn);
  373. }
  374. return Status::InvalidArgument(message);
  375. }
  376. for (int level = first_non_empty_level; level <= last_non_empty_level;
  377. ++level) {
  378. matched_input_files[level].level = level;
  379. input_files->emplace_back(std::move(matched_input_files[level]));
  380. }
  381. return Status::OK();
  382. }
  383. // Returns true if any one of the parent files are being compacted
  384. bool CompactionPicker::IsRangeInCompaction(VersionStorageInfo* vstorage,
  385. const InternalKey* smallest,
  386. const InternalKey* largest,
  387. int level, int* level_index) {
  388. std::vector<FileMetaData*> inputs;
  389. assert(level < NumberLevels());
  390. vstorage->GetOverlappingInputs(level, smallest, largest, &inputs,
  391. level_index ? *level_index : 0, level_index);
  392. return AreFilesInCompaction(inputs);
  393. }
  394. // Populates the set of inputs of all other levels that overlap with the
  395. // start level.
  396. // Now we assume all levels except start level and output level are empty.
  397. // Will also attempt to expand "start level" if that doesn't expand
  398. // "output level" or cause "level" to include a file for compaction that has an
  399. // overlapping user-key with another file.
  400. // REQUIRES: input_level and output_level are different
  401. // REQUIRES: inputs->empty() == false
  402. // Returns false if files on parent level are currently in compaction, which
  403. // means that we can't compact them
  404. bool CompactionPicker::SetupOtherInputs(
  405. const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
  406. VersionStorageInfo* vstorage, CompactionInputFiles* inputs,
  407. CompactionInputFiles* output_level_inputs, int* parent_index,
  408. int base_index) {
  409. assert(!inputs->empty());
  410. assert(output_level_inputs->empty());
  411. const int input_level = inputs->level;
  412. const int output_level = output_level_inputs->level;
  413. if (input_level == output_level) {
  414. // no possibility of conflict
  415. return true;
  416. }
  417. // For now, we only support merging two levels, start level and output level.
  418. // We need to assert other levels are empty.
  419. for (int l = input_level + 1; l < output_level; l++) {
  420. assert(vstorage->NumLevelFiles(l) == 0);
  421. }
  422. InternalKey smallest, largest;
  423. // Get the range one last time.
  424. GetRange(*inputs, &smallest, &largest);
  425. // Populate the set of next-level files (inputs_GetOutputLevelInputs()) to
  426. // include in compaction
  427. vstorage->GetOverlappingInputs(output_level, &smallest, &largest,
  428. &output_level_inputs->files, *parent_index,
  429. parent_index);
  430. if (AreFilesInCompaction(output_level_inputs->files)) {
  431. return false;
  432. }
  433. if (!output_level_inputs->empty()) {
  434. if (!ExpandInputsToCleanCut(cf_name, vstorage, output_level_inputs)) {
  435. return false;
  436. }
  437. }
  438. // See if we can further grow the number of inputs in "level" without
  439. // changing the number of "level+1" files we pick up. We also choose NOT
  440. // to expand if this would cause "level" to include some entries for some
  441. // user key, while excluding other entries for the same user key. This
  442. // can happen when one user key spans multiple files.
  443. if (!output_level_inputs->empty()) {
  444. const uint64_t limit = mutable_cf_options.max_compaction_bytes;
  445. const uint64_t output_level_inputs_size =
  446. TotalCompensatedFileSize(output_level_inputs->files);
  447. const uint64_t inputs_size = TotalCompensatedFileSize(inputs->files);
  448. bool expand_inputs = false;
  449. CompactionInputFiles expanded_inputs;
  450. expanded_inputs.level = input_level;
  451. // Get closed interval of output level
  452. InternalKey all_start, all_limit;
  453. GetRange(*inputs, *output_level_inputs, &all_start, &all_limit);
  454. bool try_overlapping_inputs = true;
  455. vstorage->GetOverlappingInputs(input_level, &all_start, &all_limit,
  456. &expanded_inputs.files, base_index, nullptr);
  457. uint64_t expanded_inputs_size =
  458. TotalCompensatedFileSize(expanded_inputs.files);
  459. if (!ExpandInputsToCleanCut(cf_name, vstorage, &expanded_inputs)) {
  460. try_overlapping_inputs = false;
  461. }
  462. if (try_overlapping_inputs && expanded_inputs.size() > inputs->size() &&
  463. output_level_inputs_size + expanded_inputs_size < limit &&
  464. !AreFilesInCompaction(expanded_inputs.files)) {
  465. InternalKey new_start, new_limit;
  466. GetRange(expanded_inputs, &new_start, &new_limit);
  467. CompactionInputFiles expanded_output_level_inputs;
  468. expanded_output_level_inputs.level = output_level;
  469. vstorage->GetOverlappingInputs(output_level, &new_start, &new_limit,
  470. &expanded_output_level_inputs.files,
  471. *parent_index, parent_index);
  472. assert(!expanded_output_level_inputs.empty());
  473. if (!AreFilesInCompaction(expanded_output_level_inputs.files) &&
  474. ExpandInputsToCleanCut(cf_name, vstorage,
  475. &expanded_output_level_inputs) &&
  476. expanded_output_level_inputs.size() == output_level_inputs->size()) {
  477. expand_inputs = true;
  478. }
  479. }
  480. if (!expand_inputs) {
  481. vstorage->GetCleanInputsWithinInterval(input_level, &all_start,
  482. &all_limit, &expanded_inputs.files,
  483. base_index, nullptr);
  484. expanded_inputs_size = TotalCompensatedFileSize(expanded_inputs.files);
  485. if (expanded_inputs.size() > inputs->size() &&
  486. output_level_inputs_size + expanded_inputs_size < limit &&
  487. !AreFilesInCompaction(expanded_inputs.files)) {
  488. expand_inputs = true;
  489. }
  490. }
  491. if (expand_inputs) {
  492. ROCKS_LOG_INFO(ioptions_.info_log,
  493. "[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
  494. "(%" PRIu64 "+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt
  495. "+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 " bytes)\n",
  496. cf_name.c_str(), input_level, inputs->size(),
  497. output_level_inputs->size(), inputs_size,
  498. output_level_inputs_size, expanded_inputs.size(),
  499. output_level_inputs->size(), expanded_inputs_size,
  500. output_level_inputs_size);
  501. inputs->files = expanded_inputs.files;
  502. }
  503. }
  504. return true;
  505. }
  506. void CompactionPicker::GetGrandparents(
  507. VersionStorageInfo* vstorage, const CompactionInputFiles& inputs,
  508. const CompactionInputFiles& output_level_inputs,
  509. std::vector<FileMetaData*>* grandparents) {
  510. InternalKey start, limit;
  511. GetRange(inputs, output_level_inputs, &start, &limit);
  512. // Compute the set of grandparent files that overlap this compaction
  513. // (parent == level+1; grandparent == level+2)
  514. if (output_level_inputs.level + 1 < NumberLevels()) {
  515. vstorage->GetOverlappingInputs(output_level_inputs.level + 1, &start,
  516. &limit, grandparents);
  517. }
  518. }
  519. Compaction* CompactionPicker::CompactRange(
  520. const std::string& cf_name, const MutableCFOptions& mutable_cf_options,
  521. VersionStorageInfo* vstorage, int input_level, int output_level,
  522. const CompactRangeOptions& compact_range_options, const InternalKey* begin,
  523. const InternalKey* end, InternalKey** compaction_end, bool* manual_conflict,
  524. uint64_t max_file_num_to_ignore) {
  525. // CompactionPickerFIFO has its own implementation of compact range
  526. assert(ioptions_.compaction_style != kCompactionStyleFIFO);
  527. if (input_level == ColumnFamilyData::kCompactAllLevels) {
  528. assert(ioptions_.compaction_style == kCompactionStyleUniversal);
  529. // Universal compaction with more than one level always compacts all the
  530. // files together to the last level.
  531. assert(vstorage->num_levels() > 1);
  532. // DBImpl::CompactRange() set output level to be the last level
  533. if (ioptions_.allow_ingest_behind) {
  534. assert(output_level == vstorage->num_levels() - 2);
  535. } else {
  536. assert(output_level == vstorage->num_levels() - 1);
  537. }
  538. // DBImpl::RunManualCompaction will make full range for universal compaction
  539. assert(begin == nullptr);
  540. assert(end == nullptr);
  541. *compaction_end = nullptr;
  542. int start_level = 0;
  543. for (; start_level < vstorage->num_levels() &&
  544. vstorage->NumLevelFiles(start_level) == 0;
  545. start_level++) {
  546. }
  547. if (start_level == vstorage->num_levels()) {
  548. return nullptr;
  549. }
  550. if ((start_level == 0) && (!level0_compactions_in_progress_.empty())) {
  551. *manual_conflict = true;
  552. // Only one level 0 compaction allowed
  553. return nullptr;
  554. }
  555. std::vector<CompactionInputFiles> inputs(vstorage->num_levels() -
  556. start_level);
  557. for (int level = start_level; level < vstorage->num_levels(); level++) {
  558. inputs[level - start_level].level = level;
  559. auto& files = inputs[level - start_level].files;
  560. for (FileMetaData* f : vstorage->LevelFiles(level)) {
  561. files.push_back(f);
  562. }
  563. if (AreFilesInCompaction(files)) {
  564. *manual_conflict = true;
  565. return nullptr;
  566. }
  567. }
  568. // 2 non-exclusive manual compactions could run at the same time producing
  569. // overlaping outputs in the same level.
  570. if (FilesRangeOverlapWithCompaction(inputs, output_level)) {
  571. // This compaction output could potentially conflict with the output
  572. // of a currently running compaction, we cannot run it.
  573. *manual_conflict = true;
  574. return nullptr;
  575. }
  576. Compaction* c = new Compaction(
  577. vstorage, ioptions_, mutable_cf_options, std::move(inputs),
  578. output_level,
  579. MaxFileSizeForLevel(mutable_cf_options, output_level,
  580. ioptions_.compaction_style),
  581. /* max_compaction_bytes */ LLONG_MAX,
  582. compact_range_options.target_path_id,
  583. GetCompressionType(ioptions_, vstorage, mutable_cf_options,
  584. output_level, 1),
  585. GetCompressionOptions(ioptions_, vstorage, output_level),
  586. compact_range_options.max_subcompactions, /* grandparents */ {},
  587. /* is manual */ true);
  588. RegisterCompaction(c);
  589. return c;
  590. }
  591. CompactionInputFiles inputs;
  592. inputs.level = input_level;
  593. bool covering_the_whole_range = true;
  594. // All files are 'overlapping' in universal style compaction.
  595. // We have to compact the entire range in one shot.
  596. if (ioptions_.compaction_style == kCompactionStyleUniversal) {
  597. begin = nullptr;
  598. end = nullptr;
  599. }
  600. vstorage->GetOverlappingInputs(input_level, begin, end, &inputs.files);
  601. if (inputs.empty()) {
  602. return nullptr;
  603. }
  604. if ((input_level == 0) && (!level0_compactions_in_progress_.empty())) {
  605. // Only one level 0 compaction allowed
  606. TEST_SYNC_POINT("CompactionPicker::CompactRange:Conflict");
  607. *manual_conflict = true;
  608. return nullptr;
  609. }
  610. // Avoid compacting too much in one shot in case the range is large.
  611. // But we cannot do this for level-0 since level-0 files can overlap
  612. // and we must not pick one file and drop another older file if the
  613. // two files overlap.
  614. if (input_level > 0) {
  615. const uint64_t limit = mutable_cf_options.max_compaction_bytes;
  616. uint64_t total = 0;
  617. for (size_t i = 0; i + 1 < inputs.size(); ++i) {
  618. uint64_t s = inputs[i]->compensated_file_size;
  619. total += s;
  620. if (total >= limit) {
  621. covering_the_whole_range = false;
  622. inputs.files.resize(i + 1);
  623. break;
  624. }
  625. }
  626. }
  627. assert(compact_range_options.target_path_id <
  628. static_cast<uint32_t>(ioptions_.cf_paths.size()));
  629. // for BOTTOM LEVEL compaction only, use max_file_num_to_ignore to filter out
  630. // files that are created during the current compaction.
  631. if (compact_range_options.bottommost_level_compaction ==
  632. BottommostLevelCompaction::kForceOptimized &&
  633. max_file_num_to_ignore != port::kMaxUint64) {
  634. assert(input_level == output_level);
  635. // inputs_shrunk holds a continuous subset of input files which were all
  636. // created before the current manual compaction
  637. std::vector<FileMetaData*> inputs_shrunk;
  638. size_t skip_input_index = inputs.size();
  639. for (size_t i = 0; i < inputs.size(); ++i) {
  640. if (inputs[i]->fd.GetNumber() < max_file_num_to_ignore) {
  641. inputs_shrunk.push_back(inputs[i]);
  642. } else if (!inputs_shrunk.empty()) {
  643. // inputs[i] was created during the current manual compaction and
  644. // need to be skipped
  645. skip_input_index = i;
  646. break;
  647. }
  648. }
  649. if (inputs_shrunk.empty()) {
  650. return nullptr;
  651. }
  652. if (inputs.size() != inputs_shrunk.size()) {
  653. inputs.files.swap(inputs_shrunk);
  654. }
  655. // set covering_the_whole_range to false if there is any file that need to
  656. // be compacted in the range of inputs[skip_input_index+1, inputs.size())
  657. for (size_t i = skip_input_index + 1; i < inputs.size(); ++i) {
  658. if (inputs[i]->fd.GetNumber() < max_file_num_to_ignore) {
  659. covering_the_whole_range = false;
  660. }
  661. }
  662. }
  663. InternalKey key_storage;
  664. InternalKey* next_smallest = &key_storage;
  665. if (ExpandInputsToCleanCut(cf_name, vstorage, &inputs, &next_smallest) ==
  666. false) {
  667. // manual compaction is now multi-threaded, so it can
  668. // happen that ExpandWhileOverlapping fails
  669. // we handle it higher in RunManualCompaction
  670. *manual_conflict = true;
  671. return nullptr;
  672. }
  673. if (covering_the_whole_range || !next_smallest) {
  674. *compaction_end = nullptr;
  675. } else {
  676. **compaction_end = *next_smallest;
  677. }
  678. CompactionInputFiles output_level_inputs;
  679. if (output_level == ColumnFamilyData::kCompactToBaseLevel) {
  680. assert(input_level == 0);
  681. output_level = vstorage->base_level();
  682. assert(output_level > 0);
  683. }
  684. output_level_inputs.level = output_level;
  685. if (input_level != output_level) {
  686. int parent_index = -1;
  687. if (!SetupOtherInputs(cf_name, mutable_cf_options, vstorage, &inputs,
  688. &output_level_inputs, &parent_index, -1)) {
  689. // manual compaction is now multi-threaded, so it can
  690. // happen that SetupOtherInputs fails
  691. // we handle it higher in RunManualCompaction
  692. *manual_conflict = true;
  693. return nullptr;
  694. }
  695. }
  696. std::vector<CompactionInputFiles> compaction_inputs({inputs});
  697. if (!output_level_inputs.empty()) {
  698. compaction_inputs.push_back(output_level_inputs);
  699. }
  700. for (size_t i = 0; i < compaction_inputs.size(); i++) {
  701. if (AreFilesInCompaction(compaction_inputs[i].files)) {
  702. *manual_conflict = true;
  703. return nullptr;
  704. }
  705. }
  706. // 2 non-exclusive manual compactions could run at the same time producing
  707. // overlaping outputs in the same level.
  708. if (FilesRangeOverlapWithCompaction(compaction_inputs, output_level)) {
  709. // This compaction output could potentially conflict with the output
  710. // of a currently running compaction, we cannot run it.
  711. *manual_conflict = true;
  712. return nullptr;
  713. }
  714. std::vector<FileMetaData*> grandparents;
  715. GetGrandparents(vstorage, inputs, output_level_inputs, &grandparents);
  716. Compaction* compaction = new Compaction(
  717. vstorage, ioptions_, mutable_cf_options, std::move(compaction_inputs),
  718. output_level,
  719. MaxFileSizeForLevel(mutable_cf_options, output_level,
  720. ioptions_.compaction_style, vstorage->base_level(),
  721. ioptions_.level_compaction_dynamic_level_bytes),
  722. mutable_cf_options.max_compaction_bytes,
  723. compact_range_options.target_path_id,
  724. GetCompressionType(ioptions_, vstorage, mutable_cf_options, output_level,
  725. vstorage->base_level()),
  726. GetCompressionOptions(ioptions_, vstorage, output_level),
  727. compact_range_options.max_subcompactions, std::move(grandparents),
  728. /* is manual compaction */ true);
  729. TEST_SYNC_POINT_CALLBACK("CompactionPicker::CompactRange:Return", compaction);
  730. RegisterCompaction(compaction);
  731. // Creating a compaction influences the compaction score because the score
  732. // takes running compactions into account (by skipping files that are already
  733. // being compacted). Since we just changed compaction score, we recalculate it
  734. // here
  735. vstorage->ComputeCompactionScore(ioptions_, mutable_cf_options);
  736. return compaction;
  737. }
  738. #ifndef ROCKSDB_LITE
  739. namespace {
  740. // Test whether two files have overlapping key-ranges.
  741. bool HaveOverlappingKeyRanges(const Comparator* c, const SstFileMetaData& a,
  742. const SstFileMetaData& b) {
  743. if (c->Compare(a.smallestkey, b.smallestkey) >= 0) {
  744. if (c->Compare(a.smallestkey, b.largestkey) <= 0) {
  745. // b.smallestkey <= a.smallestkey <= b.largestkey
  746. return true;
  747. }
  748. } else if (c->Compare(a.largestkey, b.smallestkey) >= 0) {
  749. // a.smallestkey < b.smallestkey <= a.largestkey
  750. return true;
  751. }
  752. if (c->Compare(a.largestkey, b.largestkey) <= 0) {
  753. if (c->Compare(a.largestkey, b.smallestkey) >= 0) {
  754. // b.smallestkey <= a.largestkey <= b.largestkey
  755. return true;
  756. }
  757. } else if (c->Compare(a.smallestkey, b.largestkey) <= 0) {
  758. // a.smallestkey <= b.largestkey < a.largestkey
  759. return true;
  760. }
  761. return false;
  762. }
  763. } // namespace
  764. Status CompactionPicker::SanitizeCompactionInputFilesForAllLevels(
  765. std::unordered_set<uint64_t>* input_files,
  766. const ColumnFamilyMetaData& cf_meta, const int output_level) const {
  767. auto& levels = cf_meta.levels;
  768. auto comparator = icmp_->user_comparator();
  769. // TODO(yhchiang): add is_adjustable to CompactionOptions
  770. // the smallest and largest key of the current compaction input
  771. std::string smallestkey;
  772. std::string largestkey;
  773. // a flag for initializing smallest and largest key
  774. bool is_first = false;
  775. const int kNotFound = -1;
  776. // For each level, it does the following things:
  777. // 1. Find the first and the last compaction input files
  778. // in the current level.
  779. // 2. Include all files between the first and the last
  780. // compaction input files.
  781. // 3. Update the compaction key-range.
  782. // 4. For all remaining levels, include files that have
  783. // overlapping key-range with the compaction key-range.
  784. for (int l = 0; l <= output_level; ++l) {
  785. auto& current_files = levels[l].files;
  786. int first_included = static_cast<int>(current_files.size());
  787. int last_included = kNotFound;
  788. // identify the first and the last compaction input files
  789. // in the current level.
  790. for (size_t f = 0; f < current_files.size(); ++f) {
  791. if (input_files->find(TableFileNameToNumber(current_files[f].name)) !=
  792. input_files->end()) {
  793. first_included = std::min(first_included, static_cast<int>(f));
  794. last_included = std::max(last_included, static_cast<int>(f));
  795. if (is_first == false) {
  796. smallestkey = current_files[f].smallestkey;
  797. largestkey = current_files[f].largestkey;
  798. is_first = true;
  799. }
  800. }
  801. }
  802. if (last_included == kNotFound) {
  803. continue;
  804. }
  805. if (l != 0) {
  806. // expend the compaction input of the current level if it
  807. // has overlapping key-range with other non-compaction input
  808. // files in the same level.
  809. while (first_included > 0) {
  810. if (comparator->Compare(current_files[first_included - 1].largestkey,
  811. current_files[first_included].smallestkey) <
  812. 0) {
  813. break;
  814. }
  815. first_included--;
  816. }
  817. while (last_included < static_cast<int>(current_files.size()) - 1) {
  818. if (comparator->Compare(current_files[last_included + 1].smallestkey,
  819. current_files[last_included].largestkey) > 0) {
  820. break;
  821. }
  822. last_included++;
  823. }
  824. } else if (output_level > 0) {
  825. last_included = static_cast<int>(current_files.size() - 1);
  826. }
  827. // include all files between the first and the last compaction input files.
  828. for (int f = first_included; f <= last_included; ++f) {
  829. if (current_files[f].being_compacted) {
  830. return Status::Aborted("Necessary compaction input file " +
  831. current_files[f].name +
  832. " is currently being compacted.");
  833. }
  834. input_files->insert(TableFileNameToNumber(current_files[f].name));
  835. }
  836. // update smallest and largest key
  837. if (l == 0) {
  838. for (int f = first_included; f <= last_included; ++f) {
  839. if (comparator->Compare(smallestkey, current_files[f].smallestkey) >
  840. 0) {
  841. smallestkey = current_files[f].smallestkey;
  842. }
  843. if (comparator->Compare(largestkey, current_files[f].largestkey) < 0) {
  844. largestkey = current_files[f].largestkey;
  845. }
  846. }
  847. } else {
  848. if (comparator->Compare(smallestkey,
  849. current_files[first_included].smallestkey) > 0) {
  850. smallestkey = current_files[first_included].smallestkey;
  851. }
  852. if (comparator->Compare(largestkey,
  853. current_files[last_included].largestkey) < 0) {
  854. largestkey = current_files[last_included].largestkey;
  855. }
  856. }
  857. SstFileMetaData aggregated_file_meta;
  858. aggregated_file_meta.smallestkey = smallestkey;
  859. aggregated_file_meta.largestkey = largestkey;
  860. // For all lower levels, include all overlapping files.
  861. // We need to add overlapping files from the current level too because even
  862. // if there no input_files in level l, we would still need to add files
  863. // which overlap with the range containing the input_files in levels 0 to l
  864. // Level 0 doesn't need to be handled this way because files are sorted by
  865. // time and not by key
  866. for (int m = std::max(l, 1); m <= output_level; ++m) {
  867. for (auto& next_lv_file : levels[m].files) {
  868. if (HaveOverlappingKeyRanges(comparator, aggregated_file_meta,
  869. next_lv_file)) {
  870. if (next_lv_file.being_compacted) {
  871. return Status::Aborted(
  872. "File " + next_lv_file.name +
  873. " that has overlapping key range with one of the compaction "
  874. " input file is currently being compacted.");
  875. }
  876. input_files->insert(TableFileNameToNumber(next_lv_file.name));
  877. }
  878. }
  879. }
  880. }
  881. if (RangeOverlapWithCompaction(smallestkey, largestkey, output_level)) {
  882. return Status::Aborted(
  883. "A running compaction is writing to the same output level in an "
  884. "overlapping key range");
  885. }
  886. return Status::OK();
  887. }
  888. Status CompactionPicker::SanitizeCompactionInputFiles(
  889. std::unordered_set<uint64_t>* input_files,
  890. const ColumnFamilyMetaData& cf_meta, const int output_level) const {
  891. assert(static_cast<int>(cf_meta.levels.size()) - 1 ==
  892. cf_meta.levels[cf_meta.levels.size() - 1].level);
  893. if (output_level >= static_cast<int>(cf_meta.levels.size())) {
  894. return Status::InvalidArgument(
  895. "Output level for column family " + cf_meta.name +
  896. " must between [0, " +
  897. ToString(cf_meta.levels[cf_meta.levels.size() - 1].level) + "].");
  898. }
  899. if (output_level > MaxOutputLevel()) {
  900. return Status::InvalidArgument(
  901. "Exceed the maximum output level defined by "
  902. "the current compaction algorithm --- " +
  903. ToString(MaxOutputLevel()));
  904. }
  905. if (output_level < 0) {
  906. return Status::InvalidArgument("Output level cannot be negative.");
  907. }
  908. if (input_files->size() == 0) {
  909. return Status::InvalidArgument(
  910. "A compaction must contain at least one file.");
  911. }
  912. Status s = SanitizeCompactionInputFilesForAllLevels(input_files, cf_meta,
  913. output_level);
  914. if (!s.ok()) {
  915. return s;
  916. }
  917. // for all input files, check whether the file number matches
  918. // any currently-existing files.
  919. for (auto file_num : *input_files) {
  920. bool found = false;
  921. for (const auto& level_meta : cf_meta.levels) {
  922. for (const auto& file_meta : level_meta.files) {
  923. if (file_num == TableFileNameToNumber(file_meta.name)) {
  924. if (file_meta.being_compacted) {
  925. return Status::Aborted("Specified compaction input file " +
  926. MakeTableFileName("", file_num) +
  927. " is already being compacted.");
  928. }
  929. found = true;
  930. break;
  931. }
  932. }
  933. if (found) {
  934. break;
  935. }
  936. }
  937. if (!found) {
  938. return Status::InvalidArgument(
  939. "Specified compaction input file " + MakeTableFileName("", file_num) +
  940. " does not exist in column family " + cf_meta.name + ".");
  941. }
  942. }
  943. return Status::OK();
  944. }
  945. #endif // !ROCKSDB_LITE
  946. void CompactionPicker::RegisterCompaction(Compaction* c) {
  947. if (c == nullptr) {
  948. return;
  949. }
  950. assert(ioptions_.compaction_style != kCompactionStyleLevel ||
  951. c->output_level() == 0 ||
  952. !FilesRangeOverlapWithCompaction(*c->inputs(), c->output_level()));
  953. if (c->start_level() == 0 ||
  954. ioptions_.compaction_style == kCompactionStyleUniversal) {
  955. level0_compactions_in_progress_.insert(c);
  956. }
  957. compactions_in_progress_.insert(c);
  958. }
  959. void CompactionPicker::UnregisterCompaction(Compaction* c) {
  960. if (c == nullptr) {
  961. return;
  962. }
  963. if (c->start_level() == 0 ||
  964. ioptions_.compaction_style == kCompactionStyleUniversal) {
  965. level0_compactions_in_progress_.erase(c);
  966. }
  967. compactions_in_progress_.erase(c);
  968. }
  969. void CompactionPicker::PickFilesMarkedForCompaction(
  970. const std::string& cf_name, VersionStorageInfo* vstorage, int* start_level,
  971. int* output_level, CompactionInputFiles* start_level_inputs) {
  972. if (vstorage->FilesMarkedForCompaction().empty()) {
  973. return;
  974. }
  975. auto continuation = [&, cf_name](std::pair<int, FileMetaData*> level_file) {
  976. // If it's being compacted it has nothing to do here.
  977. // If this assert() fails that means that some function marked some
  978. // files as being_compacted, but didn't call ComputeCompactionScore()
  979. assert(!level_file.second->being_compacted);
  980. *start_level = level_file.first;
  981. *output_level =
  982. (*start_level == 0) ? vstorage->base_level() : *start_level + 1;
  983. if (*start_level == 0 && !level0_compactions_in_progress()->empty()) {
  984. return false;
  985. }
  986. start_level_inputs->files = {level_file.second};
  987. start_level_inputs->level = *start_level;
  988. return ExpandInputsToCleanCut(cf_name, vstorage, start_level_inputs);
  989. };
  990. // take a chance on a random file first
  991. Random64 rnd(/* seed */ reinterpret_cast<uint64_t>(vstorage));
  992. size_t random_file_index = static_cast<size_t>(rnd.Uniform(
  993. static_cast<uint64_t>(vstorage->FilesMarkedForCompaction().size())));
  994. if (continuation(vstorage->FilesMarkedForCompaction()[random_file_index])) {
  995. // found the compaction!
  996. return;
  997. }
  998. for (auto& level_file : vstorage->FilesMarkedForCompaction()) {
  999. if (continuation(level_file)) {
  1000. // found the compaction!
  1001. return;
  1002. }
  1003. }
  1004. start_level_inputs->files.clear();
  1005. }
  1006. bool CompactionPicker::GetOverlappingL0Files(
  1007. VersionStorageInfo* vstorage, CompactionInputFiles* start_level_inputs,
  1008. int output_level, int* parent_index) {
  1009. // Two level 0 compaction won't run at the same time, so don't need to worry
  1010. // about files on level 0 being compacted.
  1011. assert(level0_compactions_in_progress()->empty());
  1012. InternalKey smallest, largest;
  1013. GetRange(*start_level_inputs, &smallest, &largest);
  1014. // Note that the next call will discard the file we placed in
  1015. // c->inputs_[0] earlier and replace it with an overlapping set
  1016. // which will include the picked file.
  1017. start_level_inputs->files.clear();
  1018. vstorage->GetOverlappingInputs(0, &smallest, &largest,
  1019. &(start_level_inputs->files));
  1020. // If we include more L0 files in the same compaction run it can
  1021. // cause the 'smallest' and 'largest' key to get extended to a
  1022. // larger range. So, re-invoke GetRange to get the new key range
  1023. GetRange(*start_level_inputs, &smallest, &largest);
  1024. if (IsRangeInCompaction(vstorage, &smallest, &largest, output_level,
  1025. parent_index)) {
  1026. return false;
  1027. }
  1028. assert(!start_level_inputs->files.empty());
  1029. return true;
  1030. }
  1031. } // namespace ROCKSDB_NAMESPACE