db_universal_compaction_test.cc 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include <string>
  10. #include "db/db_test_util.h"
  11. #include "port/stack_trace.h"
  12. #include "rocksdb/utilities/table_properties_collectors.h"
  13. #include "test_util/mock_time_env.h"
  14. #include "test_util/sync_point.h"
  15. #include "test_util/testutil.h"
  16. #include "util/random.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. static std::string CompressibleString(Random* rnd, int len) {
  19. std::string r;
  20. test::CompressibleString(rnd, 0.8, len, &r);
  21. return r;
  22. }
  23. class DBTestUniversalCompactionBase
  24. : public DBTestBase,
  25. public ::testing::WithParamInterface<std::tuple<int, bool>> {
  26. public:
  27. explicit DBTestUniversalCompactionBase(const std::string& path)
  28. : DBTestBase(path, /*env_do_fsync=*/false) {}
  29. void SetUp() override {
  30. num_levels_ = std::get<0>(GetParam());
  31. exclusive_manual_compaction_ = std::get<1>(GetParam());
  32. }
  33. int num_levels_;
  34. bool exclusive_manual_compaction_;
  35. };
  36. class DBTestUniversalCompaction : public DBTestUniversalCompactionBase {
  37. public:
  38. DBTestUniversalCompaction()
  39. : DBTestUniversalCompactionBase("/db_universal_compaction_test") {}
  40. };
  41. class DBTestUniversalCompaction2 : public DBTestBase {
  42. public:
  43. DBTestUniversalCompaction2()
  44. : DBTestBase("db_universal_compaction_test2", /*env_do_fsync=*/false) {}
  45. };
  46. namespace {
  47. void VerifyCompactionResult(
  48. const ColumnFamilyMetaData& cf_meta,
  49. const std::set<std::string>& overlapping_file_numbers) {
  50. #ifndef NDEBUG
  51. for (auto& level : cf_meta.levels) {
  52. for (auto& file : level.files) {
  53. assert(overlapping_file_numbers.find(file.name) ==
  54. overlapping_file_numbers.end());
  55. }
  56. }
  57. #endif
  58. }
  59. class KeepFilter : public CompactionFilter {
  60. public:
  61. bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
  62. std::string* /*new_value*/,
  63. bool* /*value_changed*/) const override {
  64. return false;
  65. }
  66. const char* Name() const override { return "KeepFilter"; }
  67. };
  68. class KeepFilterFactory : public CompactionFilterFactory {
  69. public:
  70. explicit KeepFilterFactory(bool check_context = false)
  71. : check_context_(check_context) {}
  72. std::unique_ptr<CompactionFilter> CreateCompactionFilter(
  73. const CompactionFilter::Context& context) override {
  74. if (check_context_) {
  75. EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
  76. EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
  77. }
  78. return std::unique_ptr<CompactionFilter>(new KeepFilter());
  79. }
  80. const char* Name() const override { return "KeepFilterFactory"; }
  81. bool check_context_;
  82. std::atomic_bool expect_full_compaction_;
  83. std::atomic_bool expect_manual_compaction_;
  84. };
  85. } // anonymous namespace
  86. // Make sure we don't trigger a problem if the trigger condtion is given
  87. // to be 0, which is invalid.
  88. TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) {
  89. Options options = CurrentOptions();
  90. options.compaction_style = kCompactionStyleUniversal;
  91. options.num_levels = num_levels_;
  92. // Config universal compaction to always compact to one single sorted run.
  93. options.level0_file_num_compaction_trigger = 0;
  94. options.compaction_options_universal.size_ratio = 10;
  95. options.compaction_options_universal.min_merge_width = 2;
  96. options.compaction_options_universal.max_size_amplification_percent = 0;
  97. options.write_buffer_size = 105 << 10; // 105KB
  98. options.arena_block_size = 4 << 10;
  99. options.target_file_size_base = 32 << 10; // 32KB
  100. // trigger compaction if there are >= 4 files
  101. KeepFilterFactory* filter = new KeepFilterFactory(true);
  102. filter->expect_manual_compaction_.store(false);
  103. options.compaction_filter_factory.reset(filter);
  104. DestroyAndReopen(options);
  105. ASSERT_EQ(1, db_->GetOptions().level0_file_num_compaction_trigger);
  106. Random rnd(301);
  107. int key_idx = 0;
  108. filter->expect_full_compaction_.store(true);
  109. for (int num = 0; num < 16; num++) {
  110. // Write 100KB file. And immediately it should be compacted to one file.
  111. GenerateNewFile(&rnd, &key_idx);
  112. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  113. ASSERT_EQ(NumSortedRuns(0), 1);
  114. }
  115. ASSERT_OK(Put(Key(key_idx), ""));
  116. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  117. ASSERT_EQ(NumSortedRuns(0), 1);
  118. }
  119. TEST_P(DBTestUniversalCompaction, OptimizeFiltersForHits) {
  120. Options options = CurrentOptions();
  121. options.compaction_style = kCompactionStyleUniversal;
  122. options.compaction_options_universal.size_ratio = 5;
  123. options.num_levels = num_levels_;
  124. options.write_buffer_size = 105 << 10; // 105KB
  125. options.arena_block_size = 4 << 10;
  126. options.target_file_size_base = 32 << 10; // 32KB
  127. // trigger compaction if there are >= 4 files
  128. options.level0_file_num_compaction_trigger = 4;
  129. BlockBasedTableOptions bbto;
  130. bbto.cache_index_and_filter_blocks = true;
  131. bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
  132. bbto.whole_key_filtering = true;
  133. options.table_factory.reset(NewBlockBasedTableFactory(bbto));
  134. options.optimize_filters_for_hits = true;
  135. options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
  136. options.memtable_factory.reset(test::NewSpecialSkipListFactory(3));
  137. DestroyAndReopen(options);
  138. // block compaction from happening
  139. env_->SetBackgroundThreads(1, Env::LOW);
  140. test::SleepingBackgroundTask sleeping_task_low;
  141. env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
  142. Env::Priority::LOW);
  143. for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
  144. ASSERT_OK(Put(Key(num * 10), "val"));
  145. if (num) {
  146. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  147. }
  148. ASSERT_OK(Put(Key(30 + num * 10), "val"));
  149. ASSERT_OK(Put(Key(60 + num * 10), "val"));
  150. }
  151. ASSERT_OK(Put("", ""));
  152. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  153. // Query set of non existing keys
  154. for (int i = 5; i < 90; i += 10) {
  155. ASSERT_EQ(Get(Key(i)), "NOT_FOUND");
  156. }
  157. // Make sure bloom filter is used at least once.
  158. ASSERT_GT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
  159. auto prev_counter = TestGetTickerCount(options, BLOOM_FILTER_USEFUL);
  160. // Make sure bloom filter is used for all but the last L0 file when looking
  161. // up a non-existent key that's in the range of all L0 files.
  162. ASSERT_EQ(Get(Key(35)), "NOT_FOUND");
  163. ASSERT_EQ(prev_counter + NumTableFilesAtLevel(0) - 1,
  164. TestGetTickerCount(options, BLOOM_FILTER_USEFUL));
  165. prev_counter = TestGetTickerCount(options, BLOOM_FILTER_USEFUL);
  166. // Unblock compaction and wait it for happening.
  167. sleeping_task_low.WakeUp();
  168. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  169. // The same queries will not trigger bloom filter
  170. for (int i = 5; i < 90; i += 10) {
  171. ASSERT_EQ(Get(Key(i)), "NOT_FOUND");
  172. }
  173. ASSERT_EQ(prev_counter, TestGetTickerCount(options, BLOOM_FILTER_USEFUL));
  174. }
  175. // TODO(kailiu) The tests on UniversalCompaction has some issues:
  176. // 1. A lot of magic numbers ("11" or "12").
  177. // 2. Made assumption on the memtable flush conditions, which may change from
  178. // time to time.
  179. TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
  180. Options options;
  181. options.compaction_style = kCompactionStyleUniversal;
  182. options.compaction_options_universal.size_ratio = 5;
  183. options.num_levels = num_levels_;
  184. options.write_buffer_size = 105 << 10; // 105KB
  185. options.arena_block_size = 4 << 10;
  186. // trigger compaction if there are >= 4 files
  187. options.level0_file_num_compaction_trigger = 4;
  188. KeepFilterFactory* filter = new KeepFilterFactory(true);
  189. filter->expect_manual_compaction_.store(false);
  190. options.compaction_filter_factory.reset(filter);
  191. options = CurrentOptions(options);
  192. DestroyAndReopen(options);
  193. CreateAndReopenWithCF({"pikachu"}, options);
  194. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  195. "DBTestWritableFile.GetPreallocationStatus", [&](void* arg) {
  196. ASSERT_TRUE(arg != nullptr);
  197. size_t preallocation_size = *(static_cast<size_t*>(arg));
  198. if (num_levels_ > 3) {
  199. ASSERT_LE(preallocation_size, options.target_file_size_base * 1.1);
  200. }
  201. });
  202. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  203. Random rnd(301);
  204. int key_idx = 0;
  205. filter->expect_full_compaction_.store(true);
  206. // Stage 1:
  207. // Generate a set of files at level 0, but don't trigger level-0
  208. // compaction.
  209. for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
  210. num++) {
  211. // Write 100KB
  212. GenerateNewFile(1, &rnd, &key_idx);
  213. }
  214. // Generate one more file at level-0, which should trigger level-0
  215. // compaction.
  216. GenerateNewFile(1, &rnd, &key_idx);
  217. // Suppose each file flushed from mem table has size 1. Now we compact
  218. // (level0_file_num_compaction_trigger+1)=4 files and should have a big
  219. // file of size 4.
  220. ASSERT_EQ(NumSortedRuns(1), 1);
  221. // Stage 2:
  222. // Now we have one file at level 0, with size 4. We also have some data in
  223. // mem table. Let's continue generating new files at level 0, but don't
  224. // trigger level-0 compaction.
  225. // First, clean up memtable before inserting new data. This will generate
  226. // a level-0 file, with size around 0.4 (according to previously written
  227. // data amount).
  228. filter->expect_full_compaction_.store(false);
  229. ASSERT_OK(Flush(1));
  230. for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
  231. num++) {
  232. GenerateNewFile(1, &rnd, &key_idx);
  233. ASSERT_EQ(NumSortedRuns(1), num + 3);
  234. }
  235. // Generate one more file at level-0, which should trigger level-0
  236. // compaction.
  237. GenerateNewFile(1, &rnd, &key_idx);
  238. // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
  239. // After compaction, we should have 2 files, with size 4, 2.4.
  240. ASSERT_EQ(NumSortedRuns(1), 2);
  241. // Stage 3:
  242. // Now we have 2 files at level 0, with size 4 and 2.4. Continue
  243. // generating new files at level 0.
  244. for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
  245. num++) {
  246. GenerateNewFile(1, &rnd, &key_idx);
  247. ASSERT_EQ(NumSortedRuns(1), num + 3);
  248. }
  249. // Generate one more file at level-0, which should trigger level-0
  250. // compaction.
  251. GenerateNewFile(1, &rnd, &key_idx);
  252. // Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
  253. // After compaction, we should have 3 files, with size 4, 2.4, 2.
  254. ASSERT_EQ(NumSortedRuns(1), 3);
  255. // Stage 4:
  256. // Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
  257. // new file of size 1.
  258. GenerateNewFile(1, &rnd, &key_idx);
  259. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  260. // Level-0 compaction is triggered, but no file will be picked up.
  261. ASSERT_EQ(NumSortedRuns(1), 4);
  262. // Stage 5:
  263. // Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
  264. // a new file of size 1.
  265. filter->expect_full_compaction_.store(true);
  266. GenerateNewFile(1, &rnd, &key_idx);
  267. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  268. // All files at level 0 will be compacted into a single one.
  269. ASSERT_EQ(NumSortedRuns(1), 1);
  270. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  271. }
  272. TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) {
  273. Options options = CurrentOptions();
  274. options.compaction_style = kCompactionStyleUniversal;
  275. options.num_levels = num_levels_;
  276. options.write_buffer_size = 100 << 10; // 100KB
  277. options.target_file_size_base = 32 << 10; // 32KB
  278. options.level0_file_num_compaction_trigger = 3;
  279. DestroyAndReopen(options);
  280. CreateAndReopenWithCF({"pikachu"}, options);
  281. // Trigger compaction if size amplification exceeds 110%
  282. options.compaction_options_universal.max_size_amplification_percent = 110;
  283. options = CurrentOptions(options);
  284. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  285. Random rnd(301);
  286. int key_idx = 0;
  287. // Generate two files in Level 0. Both files are approx the same size.
  288. for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
  289. num++) {
  290. // Write 110KB (11 values, each 10K)
  291. for (int i = 0; i < 11; i++) {
  292. ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000)));
  293. key_idx++;
  294. }
  295. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  296. ASSERT_EQ(NumSortedRuns(1), num + 1);
  297. }
  298. ASSERT_EQ(NumSortedRuns(1), 2);
  299. // Flush whatever is remaining in memtable. This is typically
  300. // small, which should not trigger size ratio based compaction
  301. // but will instead trigger size amplification.
  302. ASSERT_OK(Flush(1));
  303. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  304. // Verify that size amplification did occur
  305. ASSERT_EQ(NumSortedRuns(1), 1);
  306. }
  307. TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionSizeAmplification) {
  308. Options options = CurrentOptions();
  309. options.compaction_style = kCompactionStyleUniversal;
  310. options.num_levels = 1;
  311. options.write_buffer_size = 100 << 10; // 100KB
  312. options.target_file_size_base = 32 << 10; // 32KB
  313. options.level0_file_num_compaction_trigger = 3;
  314. // Initial setup of compaction_options_universal will prevent universal
  315. // compaction from happening
  316. options.compaction_options_universal.size_ratio = 100;
  317. options.compaction_options_universal.min_merge_width = 100;
  318. DestroyAndReopen(options);
  319. int total_picked_compactions = 0;
  320. int total_size_amp_compactions = 0;
  321. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  322. "UniversalCompactionBuilder::PickCompaction:Return", [&](void* arg) {
  323. if (arg) {
  324. total_picked_compactions++;
  325. Compaction* c = static_cast<Compaction*>(arg);
  326. if (c->compaction_reason() ==
  327. CompactionReason::kUniversalSizeAmplification) {
  328. total_size_amp_compactions++;
  329. }
  330. }
  331. });
  332. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  333. MutableCFOptions mutable_cf_options;
  334. CreateAndReopenWithCF({"pikachu"}, options);
  335. Random rnd(301);
  336. int key_idx = 0;
  337. // Generate two files in Level 0. Both files are approx the same size.
  338. for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
  339. num++) {
  340. // Write 110KB (11 values, each 10K)
  341. for (int i = 0; i < 11; i++) {
  342. ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000)));
  343. key_idx++;
  344. }
  345. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  346. ASSERT_EQ(NumSortedRuns(1), num + 1);
  347. }
  348. ASSERT_EQ(NumSortedRuns(1), 2);
  349. // Flush whatever is remaining in memtable. This is typically
  350. // small, which should not trigger size ratio based compaction
  351. // but could instead trigger size amplification if it's set
  352. // to 110.
  353. ASSERT_OK(Flush(1));
  354. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  355. // Verify compaction did not happen
  356. ASSERT_EQ(NumSortedRuns(1), 3);
  357. // Trigger compaction if size amplification exceeds 110% without reopening DB
  358. ASSERT_EQ(dbfull()
  359. ->GetOptions(handles_[1])
  360. .compaction_options_universal.max_size_amplification_percent,
  361. 200U);
  362. ASSERT_OK(dbfull()->SetOptions(handles_[1],
  363. {{"compaction_options_universal",
  364. "{max_size_amplification_percent=110;}"}}));
  365. ASSERT_EQ(dbfull()
  366. ->GetOptions(handles_[1])
  367. .compaction_options_universal.max_size_amplification_percent,
  368. 110u);
  369. ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
  370. &mutable_cf_options));
  371. ASSERT_EQ(110u, mutable_cf_options.compaction_options_universal
  372. .max_size_amplification_percent);
  373. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  374. // Verify that size amplification did happen
  375. ASSERT_EQ(NumSortedRuns(1), 1);
  376. ASSERT_EQ(total_picked_compactions, 1);
  377. ASSERT_EQ(total_size_amp_compactions, 1);
  378. }
  379. TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionReadAmplification) {
  380. Options options = CurrentOptions();
  381. options.compaction_style = kCompactionStyleUniversal;
  382. options.num_levels = 1;
  383. options.write_buffer_size = 100 << 10; // 100KB
  384. options.target_file_size_base = 32 << 10; // 32KB
  385. options.level0_file_num_compaction_trigger = 3;
  386. // Initial setup of compaction_options_universal will prevent universal
  387. // compaction from happening
  388. options.compaction_options_universal.max_size_amplification_percent = 2000;
  389. options.compaction_options_universal.size_ratio = 0;
  390. options.compaction_options_universal.min_merge_width = 100;
  391. DestroyAndReopen(options);
  392. int total_picked_compactions = 0;
  393. int total_size_ratio_compactions = 0;
  394. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  395. "UniversalCompactionBuilder::PickCompaction:Return", [&](void* arg) {
  396. if (arg) {
  397. total_picked_compactions++;
  398. Compaction* c = static_cast<Compaction*>(arg);
  399. if (c->compaction_reason() == CompactionReason::kUniversalSizeRatio) {
  400. total_size_ratio_compactions++;
  401. }
  402. }
  403. });
  404. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  405. MutableCFOptions mutable_cf_options;
  406. CreateAndReopenWithCF({"pikachu"}, options);
  407. Random rnd(301);
  408. int key_idx = 0;
  409. // Generate three files in Level 0. All files are approx the same size.
  410. for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
  411. // Write 110KB (11 values, each 10K)
  412. for (int i = 0; i < 11; i++) {
  413. ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(10000)));
  414. key_idx++;
  415. }
  416. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  417. ASSERT_EQ(NumSortedRuns(1), num + 1);
  418. }
  419. ASSERT_EQ(NumSortedRuns(1), options.level0_file_num_compaction_trigger);
  420. // Flush whatever is remaining in memtable. This is typically small, about
  421. // 30KB.
  422. ASSERT_OK(Flush(1));
  423. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  424. // Verify compaction did not happen
  425. ASSERT_EQ(NumSortedRuns(1), options.level0_file_num_compaction_trigger + 1);
  426. ASSERT_EQ(total_picked_compactions, 0);
  427. ASSERT_OK(dbfull()->SetOptions(
  428. handles_[1],
  429. {{"compaction_options_universal",
  430. "{min_merge_width=2;max_merge_width=2;size_ratio=100;}"}}));
  431. ASSERT_EQ(dbfull()
  432. ->GetOptions(handles_[1])
  433. .compaction_options_universal.min_merge_width,
  434. 2u);
  435. ASSERT_EQ(dbfull()
  436. ->GetOptions(handles_[1])
  437. .compaction_options_universal.max_merge_width,
  438. 2u);
  439. ASSERT_EQ(
  440. dbfull()->GetOptions(handles_[1]).compaction_options_universal.size_ratio,
  441. 100u);
  442. ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
  443. &mutable_cf_options));
  444. ASSERT_EQ(mutable_cf_options.compaction_options_universal.size_ratio, 100u);
  445. ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width,
  446. 2u);
  447. ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width,
  448. 2u);
  449. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  450. // Files in L0 are approx: 0.3 (30KB), 1, 1, 1.
  451. // On compaction: the files are below the size amp threshold, so we
  452. // fallthrough to checking read amp conditions. The configured size ratio is
  453. // not big enough to take 0.3 into consideration. So the next files 1 and 1
  454. // are compacted together first as they satisfy size ratio condition and
  455. // (min_merge_width, max_merge_width) condition, to give out a file size of 2.
  456. // Next, the newly generated 2 and the last file 1 are compacted together. So
  457. // at the end: #sortedRuns = 2, #picked_compactions = 2, and all the picked
  458. // ones are size ratio based compactions.
  459. ASSERT_EQ(NumSortedRuns(1), 2);
  460. // If max_merge_width had not been changed dynamically above, and if it
  461. // continued to be the default value of UINIT_MAX, total_picked_compactions
  462. // would have been 1.
  463. ASSERT_EQ(total_picked_compactions, 2);
  464. ASSERT_EQ(total_size_ratio_compactions, 2);
  465. }
  466. TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
  467. const int kTestKeySize = 16;
  468. const int kTestValueSize = 984;
  469. const int kEntrySize = kTestKeySize + kTestValueSize;
  470. const int kEntriesPerBuffer = 10;
  471. ChangeCompactOptions();
  472. Options options;
  473. options.create_if_missing = true;
  474. options.compaction_style = kCompactionStyleLevel;
  475. options.num_levels = 1;
  476. options.target_file_size_base = options.write_buffer_size;
  477. options.compression = kNoCompression;
  478. options = CurrentOptions(options);
  479. options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
  480. CreateAndReopenWithCF({"pikachu"}, options);
  481. ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
  482. Random rnd(301);
  483. for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
  484. ASSERT_OK(Put(1, std::to_string(key), rnd.RandomString(kTestValueSize)));
  485. }
  486. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  487. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  488. ColumnFamilyMetaData cf_meta;
  489. dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
  490. std::vector<std::string> compaction_input_file_names;
  491. for (const auto& file : cf_meta.levels[0].files) {
  492. if (rnd.OneIn(2)) {
  493. compaction_input_file_names.push_back(file.name);
  494. }
  495. }
  496. if (compaction_input_file_names.size() == 0) {
  497. compaction_input_file_names.push_back(cf_meta.levels[0].files[0].name);
  498. }
  499. // expect fail since universal compaction only allow L0 output
  500. ASSERT_FALSE(dbfull()
  501. ->CompactFiles(CompactionOptions(), handles_[1],
  502. compaction_input_file_names, 1)
  503. .ok());
  504. // expect ok and verify the compacted files no longer exist.
  505. ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), handles_[1],
  506. compaction_input_file_names, 0));
  507. dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
  508. VerifyCompactionResult(
  509. cf_meta, std::set<std::string>(compaction_input_file_names.begin(),
  510. compaction_input_file_names.end()));
  511. compaction_input_file_names.clear();
  512. // Pick the first and the last file, expect everything is
  513. // compacted into one single file.
  514. compaction_input_file_names.push_back(cf_meta.levels[0].files[0].name);
  515. compaction_input_file_names.push_back(
  516. cf_meta.levels[0].files[cf_meta.levels[0].files.size() - 1].name);
  517. ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), handles_[1],
  518. compaction_input_file_names, 0));
  519. dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
  520. ASSERT_EQ(cf_meta.levels[0].files.size(), 1U);
  521. }
  522. TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) {
  523. Options options = CurrentOptions();
  524. options.compaction_style = kCompactionStyleUniversal;
  525. options.write_buffer_size = 100 << 10; // 100KB
  526. options.num_levels = 7;
  527. options.disable_auto_compactions = true;
  528. DestroyAndReopen(options);
  529. // Generate 3 overlapping files
  530. Random rnd(301);
  531. for (int i = 0; i < 210; i++) {
  532. ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
  533. }
  534. ASSERT_OK(Flush());
  535. for (int i = 200; i < 300; i++) {
  536. ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
  537. }
  538. ASSERT_OK(Flush());
  539. for (int i = 250; i < 260; i++) {
  540. ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
  541. }
  542. ASSERT_OK(Flush());
  543. ASSERT_EQ("3", FilesPerLevel(0));
  544. // Compact all files into 1 file and put it in L4
  545. CompactRangeOptions compact_options;
  546. compact_options.change_level = true;
  547. compact_options.target_level = 4;
  548. compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
  549. ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
  550. ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0));
  551. }
  552. #if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
  553. class DBTestUniversalCompactionMultiLevels
  554. : public DBTestUniversalCompactionBase {
  555. public:
  556. DBTestUniversalCompactionMultiLevels()
  557. : DBTestUniversalCompactionBase(
  558. "/db_universal_compaction_multi_levels_test") {}
  559. };
  560. TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionMultiLevels) {
  561. Options options = CurrentOptions();
  562. options.compaction_style = kCompactionStyleUniversal;
  563. options.num_levels = num_levels_;
  564. options.write_buffer_size = 100 << 10; // 100KB
  565. options.level0_file_num_compaction_trigger = 8;
  566. options.max_background_compactions = 3;
  567. options.target_file_size_base = 32 * 1024;
  568. CreateAndReopenWithCF({"pikachu"}, options);
  569. // Trigger compaction if size amplification exceeds 110%
  570. options.compaction_options_universal.max_size_amplification_percent = 110;
  571. options = CurrentOptions(options);
  572. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  573. Random rnd(301);
  574. int num_keys = 100000;
  575. for (int i = 0; i < num_keys * 2; i++) {
  576. ASSERT_OK(Put(1, Key(i % num_keys), Key(i)));
  577. }
  578. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  579. for (int i = num_keys; i < num_keys * 2; i++) {
  580. ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i));
  581. }
  582. }
  583. // Tests universal compaction with trivial move enabled
  584. TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) {
  585. int32_t trivial_move = 0;
  586. int32_t non_trivial_move = 0;
  587. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  588. "DBImpl::BackgroundCompaction:TrivialMove",
  589. [&](void* /*arg*/) { trivial_move++; });
  590. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  591. "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
  592. non_trivial_move++;
  593. ASSERT_TRUE(arg != nullptr);
  594. int output_level = *(static_cast<int*>(arg));
  595. ASSERT_EQ(output_level, 0);
  596. });
  597. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  598. Options options = CurrentOptions();
  599. options.compaction_style = kCompactionStyleUniversal;
  600. options.compaction_options_universal.allow_trivial_move = true;
  601. options.num_levels = 3;
  602. options.write_buffer_size = 100 << 10; // 100KB
  603. options.level0_file_num_compaction_trigger = 3;
  604. options.max_background_compactions = 2;
  605. options.target_file_size_base = 32 * 1024;
  606. DestroyAndReopen(options);
  607. CreateAndReopenWithCF({"pikachu"}, options);
  608. // Trigger compaction if size amplification exceeds 110%
  609. options.compaction_options_universal.max_size_amplification_percent = 110;
  610. options = CurrentOptions(options);
  611. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  612. Random rnd(301);
  613. int num_keys = 150000;
  614. for (int i = 0; i < num_keys; i++) {
  615. ASSERT_OK(Put(1, Key(i), Key(i)));
  616. }
  617. std::vector<std::string> values;
  618. ASSERT_OK(Flush(1));
  619. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  620. ASSERT_GT(trivial_move, 0);
  621. ASSERT_GT(non_trivial_move, 0);
  622. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  623. }
  624. INSTANTIATE_TEST_CASE_P(MultiLevels, DBTestUniversalCompactionMultiLevels,
  625. ::testing::Combine(::testing::Values(3, 20),
  626. ::testing::Bool()));
  627. class DBTestUniversalCompactionParallel : public DBTestUniversalCompactionBase {
  628. public:
  629. DBTestUniversalCompactionParallel()
  630. : DBTestUniversalCompactionBase("/db_universal_compaction_prallel_test") {
  631. }
  632. };
  633. TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) {
  634. Options options = CurrentOptions();
  635. options.compaction_style = kCompactionStyleUniversal;
  636. options.num_levels = num_levels_;
  637. options.env = env_;
  638. options.write_buffer_size = 1 << 10; // 1KB
  639. options.level0_file_num_compaction_trigger = 3;
  640. options.max_background_compactions = 3;
  641. options.max_background_flushes = 3;
  642. options.target_file_size_base = 1 * 1024;
  643. options.compaction_options_universal.max_size_amplification_percent = 110;
  644. DestroyAndReopen(options);
  645. CreateAndReopenWithCF({"pikachu"}, options);
  646. // Delay every compaction so multiple compactions will happen.
  647. std::atomic<int> num_compactions_running(0);
  648. std::atomic<bool> has_parallel(false);
  649. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  650. "CompactionJob::Run():Start", [&](void* /*arg*/) {
  651. if (num_compactions_running.fetch_add(1) > 0) {
  652. has_parallel.store(true);
  653. return;
  654. }
  655. for (int nwait = 0; nwait < 20000; nwait++) {
  656. if (has_parallel.load() || num_compactions_running.load() > 1) {
  657. has_parallel.store(true);
  658. break;
  659. }
  660. env_->SleepForMicroseconds(1000);
  661. }
  662. });
  663. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  664. "CompactionJob::Run():End",
  665. [&](void* /*arg*/) { num_compactions_running.fetch_add(-1); });
  666. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  667. options = CurrentOptions(options);
  668. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  669. Random rnd(301);
  670. int num_keys = 30000;
  671. for (int i = 0; i < num_keys * 2; i++) {
  672. ASSERT_OK(Put(1, Key(i % num_keys), Key(i)));
  673. }
  674. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  675. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  676. ASSERT_EQ(num_compactions_running.load(), 0);
  677. ASSERT_TRUE(has_parallel.load());
  678. for (int i = num_keys; i < num_keys * 2; i++) {
  679. ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i));
  680. }
  681. // Reopen and check.
  682. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  683. for (int i = num_keys; i < num_keys * 2; i++) {
  684. ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i));
  685. }
  686. }
  687. TEST_P(DBTestUniversalCompactionParallel, PickByFileNumberBug) {
  688. Options options = CurrentOptions();
  689. options.compaction_style = kCompactionStyleUniversal;
  690. options.num_levels = num_levels_;
  691. options.write_buffer_size = 1 * 1024; // 1KB
  692. options.level0_file_num_compaction_trigger = 7;
  693. options.max_background_compactions = 2;
  694. options.target_file_size_base = 1024 * 1024; // 1MB
  695. // Disable size amplifiction compaction
  696. options.compaction_options_universal.max_size_amplification_percent =
  697. UINT_MAX;
  698. DestroyAndReopen(options);
  699. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
  700. {{"DBTestUniversalCompactionParallel::PickByFileNumberBug:0",
  701. "BackgroundCallCompaction:0"},
  702. {"UniversalCompactionBuilder::PickCompaction:Return",
  703. "DBTestUniversalCompactionParallel::PickByFileNumberBug:1"},
  704. {"DBTestUniversalCompactionParallel::PickByFileNumberBug:2",
  705. "CompactionJob::Run():Start"}});
  706. int total_picked_compactions = 0;
  707. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  708. "UniversalCompactionBuilder::PickCompaction:Return", [&](void* arg) {
  709. if (arg) {
  710. total_picked_compactions++;
  711. }
  712. });
  713. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  714. // Write 7 files to trigger compaction
  715. int key_idx = 1;
  716. for (int i = 1; i <= 70; i++) {
  717. std::string k = Key(key_idx++);
  718. ASSERT_OK(Put(k, k));
  719. if (i % 10 == 0) {
  720. ASSERT_OK(Flush());
  721. }
  722. }
  723. // Wait for the 1st background compaction process to start
  724. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0");
  725. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1");
  726. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
  727. // Write 3 files while 1st compaction is held
  728. // These 3 files have different sizes to avoid compacting based on size_ratio
  729. int num_keys = 1000;
  730. for (int i = 0; i < 3; i++) {
  731. for (int j = 1; j <= num_keys; j++) {
  732. std::string k = Key(key_idx++);
  733. ASSERT_OK(Put(k, k));
  734. }
  735. ASSERT_OK(Flush());
  736. num_keys -= 100;
  737. }
  738. // Hold the 1st compaction from finishing
  739. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:2");
  740. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  741. // There should only be one picked compaction as the score drops below one
  742. // after the first one is picked.
  743. EXPECT_EQ(total_picked_compactions, 1);
  744. EXPECT_EQ(TotalTableFiles(), 4);
  745. // Stop SyncPoint and destroy the DB and reopen it again
  746. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
  747. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  748. key_idx = 1;
  749. total_picked_compactions = 0;
  750. DestroyAndReopen(options);
  751. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  752. // Write 7 files to trigger compaction
  753. for (int i = 1; i <= 70; i++) {
  754. std::string k = Key(key_idx++);
  755. ASSERT_OK(Put(k, k));
  756. if (i % 10 == 0) {
  757. ASSERT_OK(Flush());
  758. }
  759. }
  760. // Wait for the 1st background compaction process to start
  761. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0");
  762. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1");
  763. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
  764. // Write 8 files while 1st compaction is held
  765. // These 8 files have different sizes to avoid compacting based on size_ratio
  766. num_keys = 1000;
  767. for (int i = 0; i < 8; i++) {
  768. for (int j = 1; j <= num_keys; j++) {
  769. std::string k = Key(key_idx++);
  770. ASSERT_OK(Put(k, k));
  771. }
  772. ASSERT_OK(Flush());
  773. num_keys -= 100;
  774. }
  775. // Wait for the 2nd background compaction process to start
  776. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:0");
  777. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:1");
  778. // Hold the 1st and 2nd compaction from finishing
  779. TEST_SYNC_POINT("DBTestUniversalCompactionParallel::PickByFileNumberBug:2");
  780. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  781. // This time we will trigger a compaction because of size ratio and
  782. // another compaction because of number of files that are not compacted
  783. // greater than 7
  784. EXPECT_GE(total_picked_compactions, 2);
  785. }
  786. INSTANTIATE_TEST_CASE_P(Parallel, DBTestUniversalCompactionParallel,
  787. ::testing::Combine(::testing::Values(1, 10),
  788. ::testing::Values(false)));
  789. #endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
  790. TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
  791. Options options = CurrentOptions();
  792. options.compaction_style = kCompactionStyleUniversal;
  793. options.write_buffer_size = 105 << 10; // 105KB
  794. options.arena_block_size = 4 << 10; // 4KB
  795. options.target_file_size_base = 32 << 10; // 32KB
  796. options.level0_file_num_compaction_trigger = 4;
  797. options.num_levels = num_levels_;
  798. options.compaction_options_universal.compression_size_percent = -1;
  799. DestroyAndReopen(options);
  800. CreateAndReopenWithCF({"pikachu"}, options);
  801. Random rnd(301);
  802. int key_idx = 0;
  803. for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
  804. // Write 100KB (100 values, each 1K)
  805. for (int i = 0; i < 100; i++) {
  806. ASSERT_OK(Put(1, Key(key_idx), rnd.RandomString(990)));
  807. key_idx++;
  808. }
  809. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  810. if (num < options.level0_file_num_compaction_trigger - 1) {
  811. ASSERT_EQ(NumSortedRuns(1), num + 1);
  812. }
  813. }
  814. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  815. ASSERT_EQ(NumSortedRuns(1), 1);
  816. }
  817. TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
  818. Options options = CurrentOptions();
  819. options.compaction_style = kCompactionStyleUniversal;
  820. options.write_buffer_size = 105 << 10; // 105KB
  821. options.arena_block_size = 4 << 10; // 4KB
  822. options.target_file_size_base = 32 << 10; // 32KB
  823. // trigger compaction if there are >= 4 files
  824. options.level0_file_num_compaction_trigger = 4;
  825. options.compaction_options_universal.size_ratio = 10;
  826. options.compaction_options_universal.stop_style =
  827. kCompactionStopStyleSimilarSize;
  828. options.num_levels = num_levels_;
  829. DestroyAndReopen(options);
  830. Random rnd(301);
  831. int key_idx = 0;
  832. // Stage 1:
  833. // Generate a set of files at level 0, but don't trigger level-0
  834. // compaction.
  835. for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
  836. num++) {
  837. // Write 100KB (100 values, each 1K)
  838. for (int i = 0; i < 100; i++) {
  839. ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
  840. key_idx++;
  841. }
  842. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  843. ASSERT_EQ(NumSortedRuns(), num + 1);
  844. }
  845. // Generate one more file at level-0, which should trigger level-0
  846. // compaction.
  847. for (int i = 0; i < 100; i++) {
  848. ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
  849. key_idx++;
  850. }
  851. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  852. // Suppose each file flushed from mem table has size 1. Now we compact
  853. // (level0_file_num_compaction_trigger+1)=4 files and should have a big
  854. // file of size 4.
  855. ASSERT_EQ(NumSortedRuns(), 1);
  856. // Stage 2:
  857. // Now we have one file at level 0, with size 4. We also have some data in
  858. // mem table. Let's continue generating new files at level 0, but don't
  859. // trigger level-0 compaction.
  860. // First, clean up memtable before inserting new data. This will generate
  861. // a level-0 file, with size around 0.4 (according to previously written
  862. // data amount).
  863. ASSERT_OK(dbfull()->Flush(FlushOptions()));
  864. for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
  865. num++) {
  866. // Write 110KB (11 values, each 10K)
  867. for (int i = 0; i < 100; i++) {
  868. ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
  869. key_idx++;
  870. }
  871. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  872. ASSERT_EQ(NumSortedRuns(), num + 3);
  873. }
  874. // Generate one more file at level-0, which should trigger level-0
  875. // compaction.
  876. for (int i = 0; i < 100; i++) {
  877. ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
  878. key_idx++;
  879. }
  880. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  881. // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
  882. // After compaction, we should have 3 files, with size 4, 0.4, 2.
  883. ASSERT_EQ(NumSortedRuns(), 3);
  884. // Stage 3:
  885. // Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
  886. // more file at level-0, which should trigger level-0 compaction.
  887. for (int i = 0; i < 100; i++) {
  888. ASSERT_OK(Put(Key(key_idx), rnd.RandomString(990)));
  889. key_idx++;
  890. }
  891. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  892. // Level-0 compaction is triggered, but no file will be picked up.
  893. ASSERT_EQ(NumSortedRuns(), 4);
  894. }
  895. TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio1) {
  896. if (!Snappy_Supported()) {
  897. return;
  898. }
  899. Options options = CurrentOptions();
  900. options.compaction_style = kCompactionStyleUniversal;
  901. options.write_buffer_size = 100 << 10; // 100KB
  902. options.target_file_size_base = 32 << 10; // 32KB
  903. options.level0_file_num_compaction_trigger = 2;
  904. options.num_levels = num_levels_;
  905. options.compaction_options_universal.compression_size_percent = 70;
  906. DestroyAndReopen(options);
  907. Random rnd(301);
  908. int key_idx = 0;
  909. // The first compaction (2) is compressed.
  910. for (int num = 0; num < 2; num++) {
  911. // Write 110KB (11 values, each 10K)
  912. for (int i = 0; i < 11; i++) {
  913. ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
  914. key_idx++;
  915. }
  916. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  917. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  918. }
  919. ASSERT_LT(TotalSize(), 110000U * 2 * 0.9);
  920. // The second compaction (4) is compressed
  921. for (int num = 0; num < 2; num++) {
  922. // Write 110KB (11 values, each 10K)
  923. for (int i = 0; i < 11; i++) {
  924. ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
  925. key_idx++;
  926. }
  927. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  928. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  929. }
  930. ASSERT_LT(TotalSize(), 110000 * 4 * 0.9);
  931. // The third compaction (2 4) is compressed since this time it is
  932. // (1 1 3.2) and 3.2/5.2 doesn't reach ratio.
  933. for (int num = 0; num < 2; num++) {
  934. // Write 110KB (11 values, each 10K)
  935. for (int i = 0; i < 11; i++) {
  936. ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
  937. key_idx++;
  938. }
  939. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  940. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  941. }
  942. ASSERT_LT(TotalSize(), 110000 * 6 * 0.9);
  943. // When we start for the compaction up to (2 4 8), the latest
  944. // compressed is not compressed.
  945. for (int num = 0; num < 8; num++) {
  946. // Write 110KB (11 values, each 10K)
  947. for (int i = 0; i < 11; i++) {
  948. ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
  949. key_idx++;
  950. }
  951. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  952. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  953. }
  954. ASSERT_GT(TotalSize(), 110000 * 11 * 0.8 + 110000 * 2);
  955. }
  956. TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio2) {
  957. if (!Snappy_Supported()) {
  958. return;
  959. }
  960. Options options = CurrentOptions();
  961. options.compaction_style = kCompactionStyleUniversal;
  962. options.write_buffer_size = 100 << 10; // 100KB
  963. options.target_file_size_base = 32 << 10; // 32KB
  964. options.level0_file_num_compaction_trigger = 2;
  965. options.num_levels = num_levels_;
  966. options.compaction_options_universal.compression_size_percent = 95;
  967. DestroyAndReopen(options);
  968. Random rnd(301);
  969. int key_idx = 0;
  970. // When we start for the compaction up to (2 4 8), the latest
  971. // compressed is compressed given the size ratio to compress.
  972. for (int num = 0; num < 14; num++) {
  973. // Write 120KB (12 values, each 10K)
  974. for (int i = 0; i < 12; i++) {
  975. ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
  976. key_idx++;
  977. }
  978. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  979. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  980. }
  981. ASSERT_LT(TotalSize(), 120000U * 12 * 0.82 + 120000 * 2);
  982. }
  983. #if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
  984. // Test that checks trivial move in universal compaction
  985. TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) {
  986. int32_t trivial_move = 0;
  987. int32_t non_trivial_move = 0;
  988. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  989. "DBImpl::BackgroundCompaction:TrivialMove",
  990. [&](void* /*arg*/) { trivial_move++; });
  991. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  992. "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
  993. non_trivial_move++;
  994. ASSERT_TRUE(arg != nullptr);
  995. int output_level = *(static_cast<int*>(arg));
  996. ASSERT_EQ(output_level, 0);
  997. });
  998. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  999. Options options = CurrentOptions();
  1000. options.compaction_style = kCompactionStyleUniversal;
  1001. options.compaction_options_universal.allow_trivial_move = true;
  1002. options.num_levels = 2;
  1003. options.write_buffer_size = 100 << 10; // 100KB
  1004. options.level0_file_num_compaction_trigger = 3;
  1005. options.max_background_compactions = 1;
  1006. options.target_file_size_base = 32 * 1024;
  1007. DestroyAndReopen(options);
  1008. CreateAndReopenWithCF({"pikachu"}, options);
  1009. // Trigger compaction if size amplification exceeds 110%
  1010. options.compaction_options_universal.max_size_amplification_percent = 110;
  1011. options = CurrentOptions(options);
  1012. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  1013. Random rnd(301);
  1014. int num_keys = 250000;
  1015. for (int i = 0; i < num_keys; i++) {
  1016. ASSERT_OK(Put(1, Key(i), Key(i)));
  1017. }
  1018. std::vector<std::string> values;
  1019. ASSERT_OK(Flush(1));
  1020. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1021. ASSERT_GT(trivial_move, 0);
  1022. ASSERT_GT(non_trivial_move, 0);
  1023. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  1024. }
  1025. // Test that checks trivial move in universal compaction
  1026. TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) {
  1027. int32_t trivial_move = 0;
  1028. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  1029. "DBImpl::BackgroundCompaction:TrivialMove",
  1030. [&](void* /*arg*/) { trivial_move++; });
  1031. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  1032. "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) {
  1033. ASSERT_TRUE(arg != nullptr);
  1034. int output_level = *(static_cast<int*>(arg));
  1035. ASSERT_EQ(output_level, 0);
  1036. });
  1037. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  1038. Options options = CurrentOptions();
  1039. options.compaction_style = kCompactionStyleUniversal;
  1040. options.compaction_options_universal.allow_trivial_move = true;
  1041. options.num_levels = 15;
  1042. options.write_buffer_size = 100 << 10; // 100KB
  1043. options.level0_file_num_compaction_trigger = 8;
  1044. options.max_background_compactions = 2;
  1045. options.target_file_size_base = 64 * 1024;
  1046. DestroyAndReopen(options);
  1047. CreateAndReopenWithCF({"pikachu"}, options);
  1048. // Trigger compaction if size amplification exceeds 110%
  1049. options.compaction_options_universal.max_size_amplification_percent = 110;
  1050. options = CurrentOptions(options);
  1051. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  1052. Random rnd(301);
  1053. int num_keys = 500000;
  1054. for (int i = 0; i < num_keys; i++) {
  1055. ASSERT_OK(Put(1, Key(i), Key(i)));
  1056. }
  1057. std::vector<std::string> values;
  1058. ASSERT_OK(Flush(1));
  1059. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1060. ASSERT_GT(trivial_move, 0);
  1061. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  1062. }
  1063. #endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
  1064. TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
  1065. Options options = CurrentOptions();
  1066. options.db_paths.emplace_back(dbname_, 300 * 1024);
  1067. options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024);
  1068. options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024);
  1069. options.db_paths.emplace_back(dbname_ + "_4", 1024 * 1024 * 1024);
  1070. options.memtable_factory.reset(
  1071. test::NewSpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
  1072. options.compaction_style = kCompactionStyleUniversal;
  1073. options.compaction_options_universal.size_ratio = 5;
  1074. options.write_buffer_size = 111 << 10; // 114KB
  1075. options.arena_block_size = 4 << 10;
  1076. options.level0_file_num_compaction_trigger = 2;
  1077. options.num_levels = 1;
  1078. std::vector<std::string> filenames;
  1079. if (env_->GetChildren(options.db_paths[1].path, &filenames).ok()) {
  1080. // Delete archival files.
  1081. for (size_t i = 0; i < filenames.size(); ++i) {
  1082. ASSERT_OK(
  1083. env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]));
  1084. }
  1085. ASSERT_OK(env_->DeleteDir(options.db_paths[1].path));
  1086. }
  1087. Reopen(options);
  1088. Random rnd(301);
  1089. int key_idx = 0;
  1090. // First three 110KB files are not going to second path.
  1091. // After that, (100K, 200K)
  1092. for (int num = 0; num < 3; num++) {
  1093. GenerateNewFile(&rnd, &key_idx);
  1094. }
  1095. // Another 110KB triggers a compaction to 400K file to second path
  1096. GenerateNewFile(&rnd, &key_idx);
  1097. ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
  1098. // (1, 4)
  1099. GenerateNewFile(&rnd, &key_idx);
  1100. ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
  1101. ASSERT_EQ(1, GetSstFileCount(dbname_));
  1102. // (1,1,4) -> (2, 4)
  1103. GenerateNewFile(&rnd, &key_idx);
  1104. ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
  1105. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1106. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1107. // (1, 2, 4) -> (3, 4)
  1108. GenerateNewFile(&rnd, &key_idx);
  1109. ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
  1110. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1111. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1112. // (1, 3, 4) -> (8)
  1113. GenerateNewFile(&rnd, &key_idx);
  1114. ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
  1115. // (1, 8)
  1116. GenerateNewFile(&rnd, &key_idx);
  1117. ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
  1118. ASSERT_EQ(1, GetSstFileCount(dbname_));
  1119. // (1, 1, 8) -> (2, 8)
  1120. GenerateNewFile(&rnd, &key_idx);
  1121. ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
  1122. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1123. // (1, 2, 8) -> (3, 8)
  1124. GenerateNewFile(&rnd, &key_idx);
  1125. ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
  1126. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1127. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1128. // (1, 3, 8) -> (4, 8)
  1129. GenerateNewFile(&rnd, &key_idx);
  1130. ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
  1131. ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
  1132. // (1, 4, 8) -> (5, 8)
  1133. GenerateNewFile(&rnd, &key_idx);
  1134. ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path));
  1135. ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path));
  1136. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1137. for (int i = 0; i < key_idx; i++) {
  1138. auto v = Get(Key(i));
  1139. ASSERT_NE(v, "NOT_FOUND");
  1140. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1141. }
  1142. Reopen(options);
  1143. for (int i = 0; i < key_idx; i++) {
  1144. auto v = Get(Key(i));
  1145. ASSERT_NE(v, "NOT_FOUND");
  1146. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1147. }
  1148. Destroy(options);
  1149. }
  1150. TEST_P(DBTestUniversalCompaction, UniversalCompactionCFPathUse) {
  1151. Options options = CurrentOptions();
  1152. options.db_paths.emplace_back(dbname_, 300 * 1024);
  1153. options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024);
  1154. options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024);
  1155. options.db_paths.emplace_back(dbname_ + "_4", 1024 * 1024 * 1024);
  1156. options.memtable_factory.reset(
  1157. test::NewSpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
  1158. options.compaction_style = kCompactionStyleUniversal;
  1159. options.compaction_options_universal.size_ratio = 10;
  1160. options.write_buffer_size = 111 << 10; // 114KB
  1161. options.arena_block_size = 4 << 10;
  1162. options.level0_file_num_compaction_trigger = 2;
  1163. options.num_levels = 1;
  1164. std::vector<Options> option_vector;
  1165. option_vector.emplace_back(options);
  1166. ColumnFamilyOptions cf_opt1(options), cf_opt2(options);
  1167. // Configure CF1 specific paths.
  1168. cf_opt1.cf_paths.emplace_back(dbname_ + "cf1", 300 * 1024);
  1169. cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_2", 300 * 1024);
  1170. cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_3", 500 * 1024);
  1171. cf_opt1.cf_paths.emplace_back(dbname_ + "cf1_4", 1024 * 1024 * 1024);
  1172. option_vector.emplace_back(DBOptions(options), cf_opt1);
  1173. CreateColumnFamilies({"one"}, option_vector[1]);
  1174. // Configura CF2 specific paths.
  1175. cf_opt2.cf_paths.emplace_back(dbname_ + "cf2", 300 * 1024);
  1176. cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_2", 300 * 1024);
  1177. cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_3", 500 * 1024);
  1178. cf_opt2.cf_paths.emplace_back(dbname_ + "cf2_4", 1024 * 1024 * 1024);
  1179. option_vector.emplace_back(DBOptions(options), cf_opt2);
  1180. CreateColumnFamilies({"two"}, option_vector[2]);
  1181. ReopenWithColumnFamilies({"default", "one", "two"}, option_vector);
  1182. Random rnd(301);
  1183. int key_idx = 0;
  1184. int key_idx1 = 0;
  1185. int key_idx2 = 0;
  1186. auto generate_file = [&]() {
  1187. GenerateNewFile(0, &rnd, &key_idx);
  1188. GenerateNewFile(1, &rnd, &key_idx1);
  1189. GenerateNewFile(2, &rnd, &key_idx2);
  1190. };
  1191. auto check_sstfilecount = [&](int path_id, int expected) {
  1192. ASSERT_EQ(expected, GetSstFileCount(options.db_paths[path_id].path));
  1193. ASSERT_EQ(expected, GetSstFileCount(cf_opt1.cf_paths[path_id].path));
  1194. ASSERT_EQ(expected, GetSstFileCount(cf_opt2.cf_paths[path_id].path));
  1195. };
  1196. auto check_getvalues = [&]() {
  1197. for (int i = 0; i < key_idx; i++) {
  1198. auto v = Get(0, Key(i));
  1199. ASSERT_NE(v, "NOT_FOUND");
  1200. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1201. }
  1202. for (int i = 0; i < key_idx1; i++) {
  1203. auto v = Get(1, Key(i));
  1204. ASSERT_NE(v, "NOT_FOUND");
  1205. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1206. }
  1207. for (int i = 0; i < key_idx2; i++) {
  1208. auto v = Get(2, Key(i));
  1209. ASSERT_NE(v, "NOT_FOUND");
  1210. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1211. }
  1212. };
  1213. // First three 110KB files are not going to second path.
  1214. // After that, (100K, 200K)
  1215. for (int num = 0; num < 3; num++) {
  1216. generate_file();
  1217. }
  1218. // Another 110KB triggers a compaction to 400K file to second path
  1219. generate_file();
  1220. check_sstfilecount(2, 1);
  1221. // (1, 4)
  1222. generate_file();
  1223. check_sstfilecount(2, 1);
  1224. check_sstfilecount(0, 1);
  1225. // (1,1,4) -> (2, 4)
  1226. generate_file();
  1227. check_sstfilecount(2, 1);
  1228. check_sstfilecount(1, 1);
  1229. check_sstfilecount(0, 0);
  1230. // (1, 2, 4) -> (3, 4)
  1231. generate_file();
  1232. check_sstfilecount(2, 1);
  1233. check_sstfilecount(1, 1);
  1234. check_sstfilecount(0, 0);
  1235. // (1, 3, 4) -> (8)
  1236. generate_file();
  1237. check_sstfilecount(3, 1);
  1238. // (1, 8)
  1239. generate_file();
  1240. check_sstfilecount(3, 1);
  1241. check_sstfilecount(0, 1);
  1242. // (1, 1, 8) -> (2, 8)
  1243. generate_file();
  1244. check_sstfilecount(3, 1);
  1245. check_sstfilecount(1, 1);
  1246. // (1, 2, 8) -> (3, 8)
  1247. generate_file();
  1248. check_sstfilecount(3, 1);
  1249. check_sstfilecount(1, 1);
  1250. check_sstfilecount(0, 0);
  1251. // (1, 3, 8) -> (4, 8)
  1252. generate_file();
  1253. check_sstfilecount(2, 1);
  1254. check_sstfilecount(3, 1);
  1255. // (1, 4, 8) -> (5, 8)
  1256. generate_file();
  1257. check_sstfilecount(3, 1);
  1258. check_sstfilecount(2, 1);
  1259. check_sstfilecount(0, 0);
  1260. check_getvalues();
  1261. ReopenWithColumnFamilies({"default", "one", "two"}, option_vector);
  1262. check_getvalues();
  1263. Destroy(options, true);
  1264. }
  1265. TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) {
  1266. std::function<void(int)> verify_func = [&](int num_keys_in_db) {
  1267. std::string keys_in_db;
  1268. Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]);
  1269. for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
  1270. keys_in_db.append(iter->key().ToString());
  1271. keys_in_db.push_back(',');
  1272. }
  1273. EXPECT_OK(iter->status());
  1274. delete iter;
  1275. std::string expected_keys;
  1276. for (int i = 0; i <= num_keys_in_db; i++) {
  1277. expected_keys.append(Key(i));
  1278. expected_keys.push_back(',');
  1279. }
  1280. ASSERT_EQ(keys_in_db, expected_keys);
  1281. };
  1282. Random rnd(301);
  1283. int max_key1 = 200;
  1284. int max_key2 = 600;
  1285. int max_key3 = 800;
  1286. const int KNumKeysPerFile = 10;
  1287. // Stage 1: open a DB with universal compaction, num_levels=1
  1288. Options options = CurrentOptions();
  1289. options.compaction_style = kCompactionStyleUniversal;
  1290. options.num_levels = 1;
  1291. options.write_buffer_size = 200 << 10; // 200KB
  1292. options.level0_file_num_compaction_trigger = 3;
  1293. options.memtable_factory.reset(
  1294. test::NewSpecialSkipListFactory(KNumKeysPerFile));
  1295. options = CurrentOptions(options);
  1296. CreateAndReopenWithCF({"pikachu"}, options);
  1297. for (int i = 0; i <= max_key1; i++) {
  1298. // each value is 10K
  1299. ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
  1300. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  1301. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1302. }
  1303. ASSERT_OK(Flush(1));
  1304. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1305. // Stage 2: reopen with universal compaction, num_levels=4
  1306. options.compaction_style = kCompactionStyleUniversal;
  1307. options.num_levels = 4;
  1308. options = CurrentOptions(options);
  1309. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  1310. verify_func(max_key1);
  1311. // Insert more keys
  1312. for (int i = max_key1 + 1; i <= max_key2; i++) {
  1313. // each value is 10K
  1314. ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
  1315. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  1316. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1317. }
  1318. ASSERT_OK(Flush(1));
  1319. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1320. verify_func(max_key2);
  1321. // Compaction to non-L0 has happened.
  1322. ASSERT_GT(NumTableFilesAtLevel(options.num_levels - 1, 1), 0);
  1323. // Stage 3: Revert it back to one level and revert to num_levels=1.
  1324. options.num_levels = 4;
  1325. options.target_file_size_base = INT_MAX;
  1326. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  1327. // Compact all to level 0
  1328. CompactRangeOptions compact_options;
  1329. compact_options.change_level = true;
  1330. compact_options.target_level = 0;
  1331. compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
  1332. ASSERT_OK(
  1333. dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr));
  1334. // Need to restart it once to remove higher level records in manifest.
  1335. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  1336. // Final reopen
  1337. options.compaction_style = kCompactionStyleUniversal;
  1338. options.num_levels = 1;
  1339. options = CurrentOptions(options);
  1340. ReopenWithColumnFamilies({"default", "pikachu"}, options);
  1341. // Insert more keys
  1342. for (int i = max_key2 + 1; i <= max_key3; i++) {
  1343. // each value is 10K
  1344. ASSERT_OK(Put(1, Key(i), rnd.RandomString(10000)));
  1345. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
  1346. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1347. }
  1348. ASSERT_OK(Flush(1));
  1349. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1350. verify_func(max_key3);
  1351. }
  1352. TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
  1353. if (!Snappy_Supported()) {
  1354. return;
  1355. }
  1356. Options options = CurrentOptions();
  1357. options.db_paths.emplace_back(dbname_, 500 * 1024);
  1358. options.db_paths.emplace_back(dbname_ + "_2", 1024 * 1024 * 1024);
  1359. options.compaction_style = kCompactionStyleUniversal;
  1360. options.compaction_options_universal.size_ratio = 5;
  1361. options.write_buffer_size = 111 << 10; // 114KB
  1362. options.arena_block_size = 4 << 10;
  1363. options.level0_file_num_compaction_trigger = 2;
  1364. options.num_levels = 1;
  1365. options.memtable_factory.reset(
  1366. test::NewSpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
  1367. std::vector<std::string> filenames;
  1368. if (env_->GetChildren(options.db_paths[1].path, &filenames).ok()) {
  1369. // Delete archival files.
  1370. for (size_t i = 0; i < filenames.size(); ++i) {
  1371. ASSERT_OK(
  1372. env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]));
  1373. }
  1374. ASSERT_OK(env_->DeleteDir(options.db_paths[1].path));
  1375. }
  1376. Reopen(options);
  1377. Random rnd(301);
  1378. int key_idx = 0;
  1379. // First three 110KB files are not going to second path.
  1380. // After that, (100K, 200K)
  1381. for (int num = 0; num < 3; num++) {
  1382. GenerateNewFile(&rnd, &key_idx);
  1383. }
  1384. // Another 110KB triggers a compaction to 400K file to second path
  1385. GenerateNewFile(&rnd, &key_idx);
  1386. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1387. // (1, 4)
  1388. GenerateNewFile(&rnd, &key_idx);
  1389. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1390. ASSERT_EQ(1, GetSstFileCount(dbname_));
  1391. // (1,1,4) -> (2, 4)
  1392. GenerateNewFile(&rnd, &key_idx);
  1393. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1394. ASSERT_EQ(1, GetSstFileCount(dbname_));
  1395. // (1, 2, 4) -> (3, 4)
  1396. GenerateNewFile(&rnd, &key_idx);
  1397. ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
  1398. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1399. // (1, 3, 4) -> (8)
  1400. GenerateNewFile(&rnd, &key_idx);
  1401. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1402. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1403. // (1, 8)
  1404. GenerateNewFile(&rnd, &key_idx);
  1405. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1406. ASSERT_EQ(1, GetSstFileCount(dbname_));
  1407. // (1, 1, 8) -> (2, 8)
  1408. GenerateNewFile(&rnd, &key_idx);
  1409. ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
  1410. ASSERT_EQ(1, GetSstFileCount(dbname_));
  1411. // (1, 2, 8) -> (3, 8)
  1412. GenerateNewFile(&rnd, &key_idx);
  1413. ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
  1414. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1415. // (1, 3, 8) -> (4, 8)
  1416. GenerateNewFile(&rnd, &key_idx);
  1417. ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
  1418. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1419. // (1, 4, 8) -> (5, 8)
  1420. GenerateNewFile(&rnd, &key_idx);
  1421. ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
  1422. ASSERT_EQ(0, GetSstFileCount(dbname_));
  1423. for (int i = 0; i < key_idx; i++) {
  1424. auto v = Get(Key(i));
  1425. ASSERT_NE(v, "NOT_FOUND");
  1426. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1427. }
  1428. Reopen(options);
  1429. for (int i = 0; i < key_idx; i++) {
  1430. auto v = Get(Key(i));
  1431. ASSERT_NE(v, "NOT_FOUND");
  1432. ASSERT_TRUE(v.size() == 1 || v.size() == 990);
  1433. }
  1434. Destroy(options);
  1435. }
  1436. TEST_P(DBTestUniversalCompaction, ConcurrentBottomPriLowPriCompactions) {
  1437. if (num_levels_ == 1) {
  1438. // for single-level universal, everything's bottom level so nothing should
  1439. // be executed in bottom-pri thread pool.
  1440. return;
  1441. }
  1442. const int kNumFilesTrigger = 3;
  1443. Env::Default()->SetBackgroundThreads(1, Env::Priority::BOTTOM);
  1444. for (bool universal_reduce_file_locking : {true, false}) {
  1445. Options options = CurrentOptions();
  1446. options.compaction_style = kCompactionStyleUniversal;
  1447. options.compaction_options_universal.reduce_file_locking =
  1448. universal_reduce_file_locking;
  1449. options.max_background_compactions = 2;
  1450. options.num_levels = num_levels_;
  1451. options.write_buffer_size = 100 << 10; // 100KB
  1452. options.target_file_size_base = 32 << 10; // 32KB
  1453. options.level0_file_num_compaction_trigger = kNumFilesTrigger;
  1454. // Trigger compaction if size amplification exceeds 110%
  1455. options.compaction_options_universal.max_size_amplification_percent = 110;
  1456. DestroyAndReopen(options);
  1457. // Need to get a token to enable compaction parallelism up to
  1458. // `max_background_compactions` jobs.
  1459. auto pressure_token =
  1460. dbfull()->TEST_write_controler().GetCompactionPressureToken();
  1461. if (universal_reduce_file_locking) {
  1462. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
  1463. {// Wait for the full compaction to be repicked before adding files
  1464. // intended for the second compaction.
  1465. {"DBImpl::BackgroundCompaction():AfterPickCompactionBottomPri",
  1466. "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0"},
  1467. // Wait for the second compaction to run before running the full
  1468. // compaction to verify they can run in parallel
  1469. {"DBImpl::BackgroundCompaction:NonTrivial:BeforeRun",
  1470. "DBImpl::BackgroundCompaction:NonTrivial:BeforeRunBottomPri"}});
  1471. } else {
  1472. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
  1473. {// Wait for the full compaction to be forwarded before adding files
  1474. // intended for the second compaction.
  1475. {"DBImpl::BackgroundCompaction:ForwardToBottomPriPool",
  1476. "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0"},
  1477. // Wait for the second compaction to run before running the full
  1478. // compaction to verify they can run in parallel
  1479. {"DBImpl::BackgroundCompaction:NonTrivial:BeforeRun",
  1480. "DBImpl::BackgroundCompaction:NonTrivial:BeforeRunBottomPri"}});
  1481. }
  1482. SyncPoint::GetInstance()->EnableProcessing();
  1483. Random rnd(301);
  1484. for (int i = 0; i < 2; ++i) {
  1485. for (int num = 0; num < kNumFilesTrigger; num++) {
  1486. int key_idx = 0;
  1487. GenerateNewFile(&rnd, &key_idx, true /* no_wait */);
  1488. // use no_wait above because that one waits for flush and compaction. We
  1489. // don't want to wait for compaction because the full compaction is
  1490. // intentionally blocked while more files are flushed.
  1491. ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
  1492. }
  1493. if (i == 0) {
  1494. TEST_SYNC_POINT(
  1495. "DBTestUniversalCompaction:ConcurrentBottomPriLowPriCompactions:0");
  1496. }
  1497. }
  1498. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1499. // First compaction should output to bottom level. Second should output to
  1500. // L0 since older L0 files pending compaction prevent it from being placed
  1501. // lower.
  1502. ASSERT_EQ(NumSortedRuns(), 2);
  1503. ASSERT_GT(NumTableFilesAtLevel(0), 0);
  1504. ASSERT_GT(NumTableFilesAtLevel(num_levels_ - 1), 0);
  1505. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
  1506. }
  1507. Env::Default()->SetBackgroundThreads(0, Env::Priority::BOTTOM);
  1508. }
  1509. TEST_P(DBTestUniversalCompaction, RecalculateScoreAfterPicking) {
  1510. // Regression test for extra compactions scheduled. Once enough compactions
  1511. // have been scheduled to bring the score below one, we should stop
  1512. // scheduling more; otherwise, other CFs/DBs may be delayed unnecessarily.
  1513. const int kNumFilesTrigger = 8;
  1514. Options options = CurrentOptions();
  1515. options.memtable_factory.reset(
  1516. test::NewSpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
  1517. options.compaction_options_universal.max_merge_width = kNumFilesTrigger / 2;
  1518. options.compaction_options_universal.max_size_amplification_percent =
  1519. static_cast<unsigned int>(-1);
  1520. options.compaction_style = kCompactionStyleUniversal;
  1521. options.level0_file_num_compaction_trigger = kNumFilesTrigger;
  1522. options.num_levels = num_levels_;
  1523. Reopen(options);
  1524. std::atomic<int> num_compactions_attempted(0);
  1525. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  1526. "DBImpl::BackgroundCompaction:Start",
  1527. [&](void* /*arg*/) { ++num_compactions_attempted; });
  1528. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  1529. Random rnd(301);
  1530. for (int num = 0; num < kNumFilesTrigger; num++) {
  1531. ASSERT_EQ(NumSortedRuns(), num);
  1532. int key_idx = 0;
  1533. GenerateNewFile(&rnd, &key_idx);
  1534. }
  1535. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1536. // Compacting the first four files was enough to bring the score below one so
  1537. // there's no need to schedule any more compactions.
  1538. ASSERT_EQ(1, num_compactions_attempted);
  1539. ASSERT_EQ(NumSortedRuns(), 5);
  1540. }
  1541. TEST_P(DBTestUniversalCompaction, FinalSortedRunCompactFilesConflict) {
  1542. // Regression test for conflict between:
  1543. // (1) Running CompactFiles including file in the final sorted run; and
  1544. // (2) Picking universal size-amp-triggered compaction, which always includes
  1545. // the final sorted run.
  1546. if (exclusive_manual_compaction_) {
  1547. return;
  1548. }
  1549. Options opts = CurrentOptions();
  1550. opts.compaction_style = kCompactionStyleUniversal;
  1551. opts.compaction_options_universal.max_size_amplification_percent = 50;
  1552. opts.compaction_options_universal.min_merge_width = 2;
  1553. opts.compression = kNoCompression;
  1554. opts.level0_file_num_compaction_trigger = 2;
  1555. opts.max_background_compactions = 2;
  1556. opts.num_levels = num_levels_;
  1557. Reopen(opts);
  1558. // make sure compaction jobs can be parallelized
  1559. auto stop_token =
  1560. dbfull()->TEST_write_controler().GetCompactionPressureToken();
  1561. ASSERT_OK(Put("key", "val"));
  1562. ASSERT_OK(Flush());
  1563. ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1564. ASSERT_EQ(NumTableFilesAtLevel(num_levels_ - 1), 1);
  1565. ColumnFamilyMetaData cf_meta;
  1566. ColumnFamilyHandle* default_cfh = db_->DefaultColumnFamily();
  1567. dbfull()->GetColumnFamilyMetaData(default_cfh, &cf_meta);
  1568. ASSERT_EQ(1, cf_meta.levels[num_levels_ - 1].files.size());
  1569. std::string first_sst_filename =
  1570. cf_meta.levels[num_levels_ - 1].files[0].name;
  1571. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
  1572. {{"CompactFilesImpl:0",
  1573. "DBTestUniversalCompaction:FinalSortedRunCompactFilesConflict:0"},
  1574. {"DBImpl::BackgroundCompaction():AfterPickCompaction",
  1575. "CompactFilesImpl:1"}});
  1576. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  1577. port::Thread compact_files_thread([&]() {
  1578. ASSERT_OK(dbfull()->CompactFiles(CompactionOptions(), default_cfh,
  1579. {first_sst_filename}, num_levels_ - 1));
  1580. });
  1581. TEST_SYNC_POINT(
  1582. "DBTestUniversalCompaction:FinalSortedRunCompactFilesConflict:0");
  1583. for (int i = 0; i < 2; ++i) {
  1584. ASSERT_OK(Put("key", "val"));
  1585. ASSERT_OK(Flush());
  1586. }
  1587. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1588. compact_files_thread.join();
  1589. }
  1590. INSTANTIATE_TEST_CASE_P(NumLevels, DBTestUniversalCompaction,
  1591. ::testing::Combine(::testing::Values(1, 3, 5),
  1592. ::testing::Bool()));
  1593. class DBTestUniversalManualCompactionOutputPathId
  1594. : public DBTestUniversalCompactionBase {
  1595. public:
  1596. DBTestUniversalManualCompactionOutputPathId()
  1597. : DBTestUniversalCompactionBase(
  1598. "/db_universal_compaction_manual_pid_test") {}
  1599. };
  1600. TEST_P(DBTestUniversalManualCompactionOutputPathId,
  1601. ManualCompactionOutputPathId) {
  1602. Options options = CurrentOptions();
  1603. options.create_if_missing = true;
  1604. options.db_paths.emplace_back(dbname_, 1000000000);
  1605. options.db_paths.emplace_back(dbname_ + "_2", 1000000000);
  1606. options.compaction_style = kCompactionStyleUniversal;
  1607. options.num_levels = num_levels_;
  1608. options.target_file_size_base = 1 << 30; // Big size
  1609. options.level0_file_num_compaction_trigger = 10;
  1610. Destroy(options);
  1611. DestroyAndReopen(options);
  1612. CreateAndReopenWithCF({"pikachu"}, options);
  1613. MakeTables(3, "p", "q", 1);
  1614. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1615. ASSERT_EQ(2, TotalLiveFiles(1));
  1616. ASSERT_EQ(2, GetSstFileCount(options.db_paths[0].path));
  1617. ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
  1618. // Full compaction to DB path 0
  1619. CompactRangeOptions compact_options;
  1620. compact_options.target_path_id = 1;
  1621. compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
  1622. ASSERT_OK(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
  1623. ASSERT_EQ(1, TotalLiveFiles(1));
  1624. ASSERT_EQ(0, TotalLiveFilesAtPath(1, options.db_paths[0].path));
  1625. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[1].path));
  1626. ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options);
  1627. ASSERT_EQ(1, TotalLiveFiles(1));
  1628. ASSERT_EQ(0, TotalLiveFilesAtPath(1, options.db_paths[0].path));
  1629. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[1].path));
  1630. MakeTables(1, "p", "q", 1);
  1631. ASSERT_EQ(2, TotalLiveFiles(1));
  1632. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[0].path));
  1633. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[1].path));
  1634. ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options);
  1635. ASSERT_EQ(2, TotalLiveFiles(1));
  1636. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[0].path));
  1637. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[1].path));
  1638. // Full compaction to DB path 0
  1639. compact_options.target_path_id = 0;
  1640. compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
  1641. ASSERT_OK(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr));
  1642. ASSERT_EQ(1, TotalLiveFiles(1));
  1643. ASSERT_EQ(1, TotalLiveFilesAtPath(1, options.db_paths[0].path));
  1644. ASSERT_EQ(0, TotalLiveFilesAtPath(1, options.db_paths[1].path));
  1645. // Fail when compacting to an invalid path ID
  1646. compact_options.target_path_id = 2;
  1647. compact_options.exclusive_manual_compaction = exclusive_manual_compaction_;
  1648. ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)
  1649. .IsInvalidArgument());
  1650. }
  1651. INSTANTIATE_TEST_CASE_P(OutputPathId,
  1652. DBTestUniversalManualCompactionOutputPathId,
  1653. ::testing::Combine(::testing::Values(1, 8),
  1654. ::testing::Bool()));
  1655. TEST_F(DBTestUniversalCompaction2, BasicL0toL1) {
  1656. const int kNumKeys = 3000;
  1657. const int kWindowSize = 100;
  1658. const int kNumDelsTrigger = 90;
  1659. Options opts = CurrentOptions();
  1660. opts.table_properties_collector_factories.emplace_back(
  1661. NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger));
  1662. opts.compaction_style = kCompactionStyleUniversal;
  1663. opts.level0_file_num_compaction_trigger = 2;
  1664. opts.compression = kNoCompression;
  1665. opts.compaction_options_universal.size_ratio = 10;
  1666. opts.compaction_options_universal.min_merge_width = 2;
  1667. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1668. Reopen(opts);
  1669. // add an L1 file to prevent tombstones from dropping due to obsolescence
  1670. // during flush
  1671. int i;
  1672. for (i = 0; i < 2000; ++i) {
  1673. ASSERT_OK(Put(Key(i), "val"));
  1674. }
  1675. ASSERT_OK(Flush());
  1676. // MoveFilesToLevel(6);
  1677. ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1678. for (i = 1999; i < kNumKeys; ++i) {
  1679. if (i >= kNumKeys - kWindowSize &&
  1680. i < kNumKeys - kWindowSize + kNumDelsTrigger) {
  1681. ASSERT_OK(Delete(Key(i)));
  1682. } else {
  1683. ASSERT_OK(Put(Key(i), "val"));
  1684. }
  1685. }
  1686. ASSERT_OK(Flush());
  1687. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1688. ASSERT_EQ(0, NumTableFilesAtLevel(0));
  1689. ASSERT_GT(NumTableFilesAtLevel(6), 0);
  1690. }
  1691. #if defined(ENABLE_SINGLE_LEVEL_DTC)
  1692. TEST_F(DBTestUniversalCompaction2, SingleLevel) {
  1693. const int kNumKeys = 3000;
  1694. const int kWindowSize = 100;
  1695. const int kNumDelsTrigger = 90;
  1696. Options opts = CurrentOptions();
  1697. opts.table_properties_collector_factories.emplace_back(
  1698. NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger));
  1699. opts.compaction_style = kCompactionStyleUniversal;
  1700. opts.level0_file_num_compaction_trigger = 2;
  1701. opts.compression = kNoCompression;
  1702. opts.num_levels = 1;
  1703. opts.compaction_options_universal.size_ratio = 10;
  1704. opts.compaction_options_universal.min_merge_width = 2;
  1705. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1706. Reopen(opts);
  1707. // add an L1 file to prevent tombstones from dropping due to obsolescence
  1708. // during flush
  1709. int i;
  1710. for (i = 0; i < 2000; ++i) {
  1711. ASSERT_OK(Put(Key(i), "val"));
  1712. }
  1713. ASSERT_OK(Flush());
  1714. for (i = 1999; i < kNumKeys; ++i) {
  1715. if (i >= kNumKeys - kWindowSize &&
  1716. i < kNumKeys - kWindowSize + kNumDelsTrigger) {
  1717. ASSERT_OK(Delete(Key(i)));
  1718. } else {
  1719. ASSERT_OK(Put(Key(i), "val"));
  1720. }
  1721. }
  1722. ASSERT_OK(Flush()(;
  1723. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1724. ASSERT_EQ(1, NumTableFilesAtLevel(0));
  1725. }
  1726. #endif // ENABLE_SINGLE_LEVEL_DTC
  1727. TEST_F(DBTestUniversalCompaction2, MultipleLevels) {
  1728. const int kWindowSize = 100;
  1729. const int kNumDelsTrigger = 90;
  1730. Options opts = CurrentOptions();
  1731. opts.table_properties_collector_factories.emplace_back(
  1732. NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger));
  1733. opts.compaction_style = kCompactionStyleUniversal;
  1734. opts.level0_file_num_compaction_trigger = 4;
  1735. opts.compression = kNoCompression;
  1736. opts.compaction_options_universal.size_ratio = 10;
  1737. opts.compaction_options_universal.min_merge_width = 2;
  1738. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1739. Reopen(opts);
  1740. // add an L1 file to prevent tombstones from dropping due to obsolescence
  1741. // during flush
  1742. int i;
  1743. for (i = 0; i < 500; ++i) {
  1744. ASSERT_OK(Put(Key(i), "val"));
  1745. }
  1746. ASSERT_OK(Flush());
  1747. for (i = 500; i < 1000; ++i) {
  1748. ASSERT_OK(Put(Key(i), "val"));
  1749. }
  1750. ASSERT_OK(Flush());
  1751. for (i = 1000; i < 1500; ++i) {
  1752. ASSERT_OK(Put(Key(i), "val"));
  1753. }
  1754. ASSERT_OK(Flush());
  1755. for (i = 1500; i < 2000; ++i) {
  1756. ASSERT_OK(Put(Key(i), "val"));
  1757. }
  1758. ASSERT_OK(Flush());
  1759. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1760. ASSERT_EQ(0, NumTableFilesAtLevel(0));
  1761. ASSERT_GT(NumTableFilesAtLevel(6), 0);
  1762. for (i = 1999; i < 2333; ++i) {
  1763. ASSERT_OK(Put(Key(i), "val"));
  1764. }
  1765. ASSERT_OK(Flush());
  1766. for (i = 2333; i < 2666; ++i) {
  1767. ASSERT_OK(Put(Key(i), "val"));
  1768. }
  1769. ASSERT_OK(Flush());
  1770. for (i = 2666; i < 2999; ++i) {
  1771. ASSERT_OK(Put(Key(i), "val"));
  1772. }
  1773. ASSERT_OK(Flush());
  1774. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1775. ASSERT_EQ(0, NumTableFilesAtLevel(0));
  1776. ASSERT_GT(NumTableFilesAtLevel(6), 0);
  1777. ASSERT_GT(NumTableFilesAtLevel(5), 0);
  1778. for (i = 1900; i < 2100; ++i) {
  1779. ASSERT_OK(Delete(Key(i)));
  1780. }
  1781. ASSERT_OK(Flush());
  1782. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1783. ASSERT_EQ(0, NumTableFilesAtLevel(0));
  1784. ASSERT_EQ(0, NumTableFilesAtLevel(1));
  1785. ASSERT_EQ(0, NumTableFilesAtLevel(2));
  1786. ASSERT_EQ(0, NumTableFilesAtLevel(3));
  1787. ASSERT_EQ(0, NumTableFilesAtLevel(4));
  1788. ASSERT_EQ(0, NumTableFilesAtLevel(5));
  1789. ASSERT_GT(NumTableFilesAtLevel(6), 0);
  1790. }
  1791. TEST_F(DBTestUniversalCompaction2, OverlappingL0) {
  1792. const int kWindowSize = 100;
  1793. const int kNumDelsTrigger = 90;
  1794. Options opts = CurrentOptions();
  1795. opts.table_properties_collector_factories.emplace_back(
  1796. NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger));
  1797. opts.compaction_style = kCompactionStyleUniversal;
  1798. opts.level0_file_num_compaction_trigger = 5;
  1799. opts.compression = kNoCompression;
  1800. opts.compaction_options_universal.size_ratio = 10;
  1801. opts.compaction_options_universal.min_merge_width = 2;
  1802. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1803. Reopen(opts);
  1804. // add an L1 file to prevent tombstones from dropping due to obsolescence
  1805. // during flush
  1806. int i;
  1807. for (i = 0; i < 2000; ++i) {
  1808. ASSERT_OK(Put(Key(i), "val"));
  1809. }
  1810. ASSERT_OK(Flush());
  1811. for (i = 2000; i < 3000; ++i) {
  1812. ASSERT_OK(Put(Key(i), "val"));
  1813. }
  1814. ASSERT_OK(Flush());
  1815. for (i = 3500; i < 4000; ++i) {
  1816. ASSERT_OK(Put(Key(i), "val"));
  1817. }
  1818. ASSERT_OK(Flush());
  1819. for (i = 2900; i < 3100; ++i) {
  1820. ASSERT_OK(Delete(Key(i)));
  1821. }
  1822. ASSERT_OK(Flush());
  1823. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1824. ASSERT_EQ(2, NumTableFilesAtLevel(0));
  1825. ASSERT_GT(NumTableFilesAtLevel(6), 0);
  1826. }
  1827. TEST_F(DBTestUniversalCompaction2, IngestBehind) {
  1828. for (bool cf_option : {false, true}) {
  1829. SCOPED_TRACE("cf_option = " + std::to_string(cf_option));
  1830. const int kNumKeys = 3000;
  1831. const int kWindowSize = 100;
  1832. const int kNumDelsTrigger = 90;
  1833. Options opts = CurrentOptions();
  1834. opts.table_properties_collector_factories.emplace_back(
  1835. NewCompactOnDeletionCollectorFactory(kWindowSize, kNumDelsTrigger));
  1836. opts.compaction_style = kCompactionStyleUniversal;
  1837. opts.level0_file_num_compaction_trigger = 2;
  1838. opts.compression = kNoCompression;
  1839. if (cf_option) {
  1840. opts.cf_allow_ingest_behind = true;
  1841. } else {
  1842. opts.allow_ingest_behind = true;
  1843. }
  1844. opts.compaction_options_universal.size_ratio = 10;
  1845. opts.compaction_options_universal.min_merge_width = 2;
  1846. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1847. Reopen(opts);
  1848. // add an L1 file to prevent tombstones from dropping due to obsolescence
  1849. // during flush
  1850. int i;
  1851. for (i = 0; i < 2000; ++i) {
  1852. ASSERT_OK(Put(Key(i), "val"));
  1853. }
  1854. ASSERT_OK(Flush());
  1855. // MoveFilesToLevel(6);
  1856. ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
  1857. for (i = 1999; i < kNumKeys; ++i) {
  1858. if (i >= kNumKeys - kWindowSize &&
  1859. i < kNumKeys - kWindowSize + kNumDelsTrigger) {
  1860. ASSERT_OK(Delete(Key(i)));
  1861. } else {
  1862. ASSERT_OK(Put(Key(i), "val"));
  1863. }
  1864. }
  1865. ASSERT_OK(Flush());
  1866. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1867. ASSERT_EQ(0, NumTableFilesAtLevel(0));
  1868. ASSERT_EQ(0, NumTableFilesAtLevel(6));
  1869. ASSERT_GT(NumTableFilesAtLevel(5), 0);
  1870. if (cf_option) {
  1871. // Test that another CF does not allow ingest behind
  1872. ColumnFamilyHandle* new_cfh;
  1873. Options new_cf_option;
  1874. new_cf_option.compaction_style = kCompactionStyleUniversal;
  1875. new_cf_option.num_levels = 7;
  1876. // CreateColumnFamilies({"new_cf"}, new_cf_option);
  1877. ASSERT_OK(db_->CreateColumnFamily(new_cf_option, "new_cf", &new_cfh));
  1878. // handles_.push_back(new_cfh);
  1879. for (i = 0; i < 10; ++i) {
  1880. // ASSERT_OK(Put(1, Key(i), "val"));
  1881. ASSERT_OK(db_->Put(WriteOptions(), new_cfh, Key(i), "val"));
  1882. }
  1883. ASSERT_OK(
  1884. db_->CompactRange(CompactRangeOptions(), new_cfh, nullptr, nullptr));
  1885. // This CF can use the last leve.
  1886. std::string property;
  1887. EXPECT_TRUE(db_->GetProperty(
  1888. new_cfh, "rocksdb.num-files-at-level" + std::to_string(6),
  1889. &property));
  1890. ASSERT_EQ(1, atoi(property.c_str()));
  1891. ASSERT_OK(db_->DropColumnFamily(new_cfh));
  1892. ASSERT_OK(db_->DestroyColumnFamilyHandle(new_cfh));
  1893. }
  1894. }
  1895. }
  1896. TEST_F(DBTestUniversalCompaction2, PeriodicCompactionDefault) {
  1897. Options options;
  1898. options.compaction_style = kCompactionStyleUniversal;
  1899. options.env = env_;
  1900. KeepFilterFactory* filter = new KeepFilterFactory(true);
  1901. options.compaction_filter_factory.reset(filter);
  1902. Reopen(options);
  1903. ASSERT_EQ(30 * 24 * 60 * 60,
  1904. dbfull()->GetOptions().periodic_compaction_seconds);
  1905. KeepFilter df;
  1906. options.compaction_filter_factory.reset();
  1907. options.compaction_filter = &df;
  1908. Reopen(options);
  1909. ASSERT_EQ(30 * 24 * 60 * 60,
  1910. dbfull()->GetOptions().periodic_compaction_seconds);
  1911. options.ttl = 60 * 24 * 60 * 60;
  1912. options.compaction_filter = nullptr;
  1913. Reopen(options);
  1914. ASSERT_EQ(30 * 24 * 60 * 60,
  1915. dbfull()->GetOptions().periodic_compaction_seconds);
  1916. options.periodic_compaction_seconds = 45 * 24 * 60 * 60;
  1917. options.ttl = 50 * 24 * 60 * 60;
  1918. Reopen(options);
  1919. ASSERT_EQ(45 * 24 * 60 * 60,
  1920. dbfull()->GetOptions().periodic_compaction_seconds);
  1921. options.periodic_compaction_seconds = 0;
  1922. options.ttl = 50 * 24 * 60 * 60;
  1923. Reopen(options);
  1924. ASSERT_EQ(50 * 24 * 60 * 60,
  1925. dbfull()->GetOptions().periodic_compaction_seconds);
  1926. }
  1927. TEST_F(DBTestUniversalCompaction2, PeriodicCompaction) {
  1928. Options opts = CurrentOptions();
  1929. opts.env = env_;
  1930. opts.compaction_style = kCompactionStyleUniversal;
  1931. opts.level0_file_num_compaction_trigger = 10;
  1932. opts.max_open_files = -1;
  1933. opts.compaction_options_universal.size_ratio = 10;
  1934. opts.compaction_options_universal.min_merge_width = 2;
  1935. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1936. opts.periodic_compaction_seconds = 48 * 60 * 60; // 2 days
  1937. opts.num_levels = 5;
  1938. env_->SetMockSleep();
  1939. Reopen(opts);
  1940. // NOTE: Presumed unnecessary and removed: resetting mock time in env
  1941. int periodic_compactions = 0;
  1942. int start_level = -1;
  1943. int output_level = -1;
  1944. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  1945. "UniversalCompactionPicker::PickPeriodicCompaction:Return",
  1946. [&](void* arg) {
  1947. Compaction* compaction = static_cast<Compaction*>(arg);
  1948. ASSERT_TRUE(arg != nullptr);
  1949. ASSERT_TRUE(compaction->compaction_reason() ==
  1950. CompactionReason::kPeriodicCompaction);
  1951. start_level = compaction->start_level();
  1952. output_level = compaction->output_level();
  1953. periodic_compactions++;
  1954. });
  1955. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  1956. // Case 1: Oldest flushed file excceeds periodic compaction threshold.
  1957. ASSERT_OK(Put("foo", "bar"));
  1958. ASSERT_OK(Flush());
  1959. ASSERT_EQ(0, periodic_compactions);
  1960. // Move clock forward so that the flushed file would qualify periodic
  1961. // compaction.
  1962. env_->MockSleepForSeconds(48 * 60 * 60 + 100);
  1963. // Another flush would trigger compaction the oldest file.
  1964. ASSERT_OK(Put("foo", "bar2"));
  1965. ASSERT_OK(Flush());
  1966. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1967. ASSERT_EQ(1, periodic_compactions);
  1968. ASSERT_EQ(0, start_level);
  1969. ASSERT_EQ(4, output_level);
  1970. // Case 2: Oldest compacted file excceeds periodic compaction threshold
  1971. periodic_compactions = 0;
  1972. // A flush doesn't trigger a periodic compaction when threshold not hit
  1973. ASSERT_OK(Put("foo", "bar2"));
  1974. ASSERT_OK(Flush());
  1975. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1976. ASSERT_EQ(0, periodic_compactions);
  1977. // After periodic compaction threshold hits, a flush will trigger
  1978. // a compaction
  1979. ASSERT_OK(Put("foo", "bar2"));
  1980. env_->MockSleepForSeconds(48 * 60 * 60 + 100);
  1981. ASSERT_OK(Flush());
  1982. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  1983. ASSERT_EQ(1, periodic_compactions);
  1984. ASSERT_EQ(0, start_level);
  1985. ASSERT_EQ(4, output_level);
  1986. }
  1987. TEST_F(DBTestUniversalCompaction2, PeriodicCompactionOffpeak) {
  1988. constexpr int kSecondsPerDay = 86400;
  1989. constexpr int kSecondsPerHour = 3600;
  1990. constexpr int kSecondsPerMinute = 60;
  1991. Options opts = CurrentOptions();
  1992. opts.compaction_style = kCompactionStyleUniversal;
  1993. opts.level0_file_num_compaction_trigger = 10;
  1994. opts.max_open_files = -1;
  1995. opts.compaction_options_universal.size_ratio = 10;
  1996. opts.compaction_options_universal.min_merge_width = 2;
  1997. opts.compaction_options_universal.max_size_amplification_percent = 200;
  1998. opts.periodic_compaction_seconds = 5 * kSecondsPerDay; // 5 days
  1999. opts.num_levels = 5;
  2000. // Just to add some extra random days to current time
  2001. Random rnd(test::RandomSeed());
  2002. int days = rnd.Uniform(100);
  2003. int periodic_compactions = 0;
  2004. int start_level = -1;
  2005. int output_level = -1;
  2006. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
  2007. "UniversalCompactionPicker::PickPeriodicCompaction:Return",
  2008. [&](void* arg) {
  2009. Compaction* compaction = static_cast<Compaction*>(arg);
  2010. ASSERT_TRUE(arg != nullptr);
  2011. ASSERT_TRUE(compaction->compaction_reason() ==
  2012. CompactionReason::kPeriodicCompaction);
  2013. start_level = compaction->start_level();
  2014. output_level = compaction->output_level();
  2015. periodic_compactions++;
  2016. });
  2017. ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
  2018. for (std::string preset_offpeak_time : {"", "00:30-04:30", "10:30-02:30"}) {
  2019. SCOPED_TRACE("preset_offpeak_time=" + preset_offpeak_time);
  2020. for (std::string new_offpeak_time : {"", "23:30-02:30"}) {
  2021. SCOPED_TRACE("new_offpeak_time=" + new_offpeak_time);
  2022. std::vector<std::pair<int, int>> times_to_test = {
  2023. {0, 0}, {2, 30}, {3, 15}, {5, 10}, {13, 30}, {23, 30}};
  2024. for (std::pair<int, int> now : times_to_test) {
  2025. int now_hour = now.first;
  2026. int now_minute = now.second;
  2027. SCOPED_TRACE("now=" + std::to_string(now_hour) + ":" +
  2028. std::to_string(now_minute));
  2029. auto mock_clock =
  2030. std::make_shared<MockSystemClock>(env_->GetSystemClock());
  2031. auto mock_env = std::make_unique<CompositeEnvWrapper>(env_, mock_clock);
  2032. opts.env = mock_env.get();
  2033. mock_clock->SetCurrentTime(days * kSecondsPerDay +
  2034. now_hour * kSecondsPerHour +
  2035. now_minute * kSecondsPerMinute);
  2036. opts.daily_offpeak_time_utc = preset_offpeak_time;
  2037. Reopen(opts);
  2038. ASSERT_OK(Put("foo", "bar1"));
  2039. ASSERT_OK(Flush());
  2040. ASSERT_EQ(0, periodic_compactions);
  2041. // Move clock forward by 8 hours. There should be no periodic
  2042. // compaction, yet.
  2043. mock_clock->MockSleepForSeconds(8 * kSecondsPerHour);
  2044. ASSERT_OK(Put("foo", "bar2"));
  2045. ASSERT_OK(Flush());
  2046. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  2047. ASSERT_EQ(0, periodic_compactions);
  2048. // Move clock forward by 4 days
  2049. mock_clock->MockSleepForSeconds(4 * kSecondsPerDay);
  2050. ASSERT_OK(Put("foo", "bar3"));
  2051. ASSERT_OK(Flush());
  2052. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  2053. int64_t mock_now;
  2054. ASSERT_OK(mock_clock->GetCurrentTime(&mock_now));
  2055. auto offpeak_time_info =
  2056. dbfull()->GetVersionSet()->offpeak_time_option().GetOffpeakTimeInfo(
  2057. mock_now);
  2058. // At this point, the first file is 4 days and 8 hours old.
  2059. // If it's offpeak now and the file is expected to expire before the
  2060. // next offpeak starts
  2061. if (offpeak_time_info.is_now_offpeak &&
  2062. offpeak_time_info.seconds_till_next_offpeak_start /
  2063. kSecondsPerHour >
  2064. 16) {
  2065. ASSERT_EQ(1, periodic_compactions);
  2066. } else {
  2067. ASSERT_EQ(0, periodic_compactions);
  2068. // Change offpeak option by SetDBOption()
  2069. if (preset_offpeak_time != new_offpeak_time) {
  2070. ASSERT_OK(dbfull()->SetDBOptions(
  2071. {{"daily_offpeak_time_utc", new_offpeak_time}}));
  2072. ASSERT_OK(Put("foo", "bar4"));
  2073. ASSERT_OK(Flush());
  2074. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  2075. offpeak_time_info = dbfull()
  2076. ->GetVersionSet()
  2077. ->offpeak_time_option()
  2078. .GetOffpeakTimeInfo(mock_now);
  2079. // if the first file is now eligible to be picked up
  2080. if (offpeak_time_info.is_now_offpeak &&
  2081. offpeak_time_info.seconds_till_next_offpeak_start /
  2082. kSecondsPerHour >
  2083. 16) {
  2084. ASSERT_OK(Put("foo", "bar5"));
  2085. ASSERT_OK(Flush());
  2086. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  2087. ASSERT_EQ(1, periodic_compactions);
  2088. }
  2089. }
  2090. // If the file has not been picked up yet (no offpeak set, or offpeak
  2091. // set but then unset before the file becomes eligible)
  2092. if (periodic_compactions == 0) {
  2093. // move clock forward by one more day
  2094. mock_clock->MockSleepForSeconds(1 * kSecondsPerDay);
  2095. ASSERT_OK(Put("foo", "bar6"));
  2096. ASSERT_OK(Flush());
  2097. ASSERT_OK(dbfull()->TEST_WaitForCompact());
  2098. }
  2099. }
  2100. ASSERT_EQ(1, periodic_compactions);
  2101. ASSERT_EQ(0, start_level);
  2102. ASSERT_EQ(4, output_level);
  2103. Destroy(opts);
  2104. periodic_compactions = 0;
  2105. }
  2106. }
  2107. }
  2108. }
  2109. } // namespace ROCKSDB_NAMESPACE
  2110. int main(int argc, char** argv) {
  2111. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  2112. ::testing::InitGoogleTest(&argc, argv);
  2113. return RUN_ALL_TESTS();
  2114. }