db_test_util.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #pragma once
  10. #include <fcntl.h>
  11. #include <cinttypes>
  12. #include <algorithm>
  13. #include <map>
  14. #include <set>
  15. #include <string>
  16. #include <thread>
  17. #include <unordered_set>
  18. #include <utility>
  19. #include <vector>
  20. #include "db/db_impl/db_impl.h"
  21. #include "db/dbformat.h"
  22. #include "env/mock_env.h"
  23. #include "file/filename.h"
  24. #include "memtable/hash_linklist_rep.h"
  25. #include "rocksdb/cache.h"
  26. #include "rocksdb/compaction_filter.h"
  27. #include "rocksdb/convenience.h"
  28. #include "rocksdb/db.h"
  29. #include "rocksdb/env.h"
  30. #include "rocksdb/filter_policy.h"
  31. #include "rocksdb/options.h"
  32. #include "rocksdb/slice.h"
  33. #include "rocksdb/sst_file_writer.h"
  34. #include "rocksdb/statistics.h"
  35. #include "rocksdb/table.h"
  36. #include "rocksdb/utilities/checkpoint.h"
  37. #include "table/block_based/block_based_table_factory.h"
  38. #include "table/mock_table.h"
  39. #include "table/plain/plain_table_factory.h"
  40. #include "table/scoped_arena_iterator.h"
  41. #include "test_util/mock_time_env.h"
  42. #include "util/compression.h"
  43. #include "util/mutexlock.h"
  44. #include "test_util/sync_point.h"
  45. #include "test_util/testharness.h"
  46. #include "test_util/testutil.h"
  47. #include "util/string_util.h"
  48. #include "utilities/merge_operators.h"
  49. namespace ROCKSDB_NAMESPACE {
  50. namespace anon {
  51. class AtomicCounter {
  52. public:
  53. explicit AtomicCounter(Env* env = NULL)
  54. : env_(env), cond_count_(&mu_), count_(0) {}
  55. void Increment() {
  56. MutexLock l(&mu_);
  57. count_++;
  58. cond_count_.SignalAll();
  59. }
  60. int Read() {
  61. MutexLock l(&mu_);
  62. return count_;
  63. }
  64. bool WaitFor(int count) {
  65. MutexLock l(&mu_);
  66. uint64_t start = env_->NowMicros();
  67. while (count_ < count) {
  68. uint64_t now = env_->NowMicros();
  69. cond_count_.TimedWait(now + /*1s*/ 1 * 1000 * 1000);
  70. if (env_->NowMicros() - start > /*10s*/ 10 * 1000 * 1000) {
  71. return false;
  72. }
  73. if (count_ < count) {
  74. GTEST_LOG_(WARNING) << "WaitFor is taking more time than usual";
  75. }
  76. }
  77. return true;
  78. }
  79. void Reset() {
  80. MutexLock l(&mu_);
  81. count_ = 0;
  82. cond_count_.SignalAll();
  83. }
  84. private:
  85. Env* env_;
  86. port::Mutex mu_;
  87. port::CondVar cond_count_;
  88. int count_;
  89. };
  90. struct OptionsOverride {
  91. std::shared_ptr<const FilterPolicy> filter_policy = nullptr;
  92. // These will be used only if filter_policy is set
  93. bool partition_filters = false;
  94. uint64_t metadata_block_size = 1024;
  95. // Used as a bit mask of individual enums in which to skip an XF test point
  96. int skip_policy = 0;
  97. };
  98. } // namespace anon
  99. enum SkipPolicy { kSkipNone = 0, kSkipNoSnapshot = 1, kSkipNoPrefix = 2 };
  100. // A hacky skip list mem table that triggers flush after number of entries.
  101. class SpecialMemTableRep : public MemTableRep {
  102. public:
  103. explicit SpecialMemTableRep(Allocator* allocator, MemTableRep* memtable,
  104. int num_entries_flush)
  105. : MemTableRep(allocator),
  106. memtable_(memtable),
  107. num_entries_flush_(num_entries_flush),
  108. num_entries_(0) {}
  109. virtual KeyHandle Allocate(const size_t len, char** buf) override {
  110. return memtable_->Allocate(len, buf);
  111. }
  112. // Insert key into the list.
  113. // REQUIRES: nothing that compares equal to key is currently in the list.
  114. virtual void Insert(KeyHandle handle) override {
  115. num_entries_++;
  116. memtable_->Insert(handle);
  117. }
  118. void InsertConcurrently(KeyHandle handle) override {
  119. num_entries_++;
  120. memtable_->Insert(handle);
  121. }
  122. // Returns true iff an entry that compares equal to key is in the list.
  123. virtual bool Contains(const char* key) const override {
  124. return memtable_->Contains(key);
  125. }
  126. virtual size_t ApproximateMemoryUsage() override {
  127. // Return a high memory usage when number of entries exceeds the threshold
  128. // to trigger a flush.
  129. return (num_entries_ < num_entries_flush_) ? 0 : 1024 * 1024 * 1024;
  130. }
  131. virtual void Get(const LookupKey& k, void* callback_args,
  132. bool (*callback_func)(void* arg,
  133. const char* entry)) override {
  134. memtable_->Get(k, callback_args, callback_func);
  135. }
  136. uint64_t ApproximateNumEntries(const Slice& start_ikey,
  137. const Slice& end_ikey) override {
  138. return memtable_->ApproximateNumEntries(start_ikey, end_ikey);
  139. }
  140. virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override {
  141. return memtable_->GetIterator(arena);
  142. }
  143. virtual ~SpecialMemTableRep() override {}
  144. private:
  145. std::unique_ptr<MemTableRep> memtable_;
  146. int num_entries_flush_;
  147. int num_entries_;
  148. };
  149. // The factory for the hacky skip list mem table that triggers flush after
  150. // number of entries exceeds a threshold.
  151. class SpecialSkipListFactory : public MemTableRepFactory {
  152. public:
  153. // After number of inserts exceeds `num_entries_flush` in a mem table, trigger
  154. // flush.
  155. explicit SpecialSkipListFactory(int num_entries_flush)
  156. : num_entries_flush_(num_entries_flush) {}
  157. using MemTableRepFactory::CreateMemTableRep;
  158. virtual MemTableRep* CreateMemTableRep(
  159. const MemTableRep::KeyComparator& compare, Allocator* allocator,
  160. const SliceTransform* transform, Logger* /*logger*/) override {
  161. return new SpecialMemTableRep(
  162. allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0),
  163. num_entries_flush_);
  164. }
  165. virtual const char* Name() const override { return "SkipListFactory"; }
  166. bool IsInsertConcurrentlySupported() const override {
  167. return factory_.IsInsertConcurrentlySupported();
  168. }
  169. private:
  170. SkipListFactory factory_;
  171. int num_entries_flush_;
  172. };
  173. // Special Env used to delay background operations
  174. class SpecialEnv : public EnvWrapper {
  175. public:
  176. explicit SpecialEnv(Env* base);
  177. Status NewWritableFile(const std::string& f, std::unique_ptr<WritableFile>* r,
  178. const EnvOptions& soptions) override {
  179. class SSTableFile : public WritableFile {
  180. private:
  181. SpecialEnv* env_;
  182. std::unique_ptr<WritableFile> base_;
  183. public:
  184. SSTableFile(SpecialEnv* env, std::unique_ptr<WritableFile>&& base)
  185. : env_(env), base_(std::move(base)) {}
  186. Status Append(const Slice& data) override {
  187. if (env_->table_write_callback_) {
  188. (*env_->table_write_callback_)();
  189. }
  190. if (env_->drop_writes_.load(std::memory_order_acquire)) {
  191. // Drop writes on the floor
  192. return Status::OK();
  193. } else if (env_->no_space_.load(std::memory_order_acquire)) {
  194. return Status::NoSpace("No space left on device");
  195. } else {
  196. env_->bytes_written_ += data.size();
  197. return base_->Append(data);
  198. }
  199. }
  200. Status PositionedAppend(const Slice& data, uint64_t offset) override {
  201. if (env_->table_write_callback_) {
  202. (*env_->table_write_callback_)();
  203. }
  204. if (env_->drop_writes_.load(std::memory_order_acquire)) {
  205. // Drop writes on the floor
  206. return Status::OK();
  207. } else if (env_->no_space_.load(std::memory_order_acquire)) {
  208. return Status::NoSpace("No space left on device");
  209. } else {
  210. env_->bytes_written_ += data.size();
  211. return base_->PositionedAppend(data, offset);
  212. }
  213. }
  214. Status Truncate(uint64_t size) override { return base_->Truncate(size); }
  215. Status RangeSync(uint64_t offset, uint64_t nbytes) override {
  216. Status s = base_->RangeSync(offset, nbytes);
  217. #if !(defined NDEBUG) || !defined(OS_WIN)
  218. TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::RangeSync", &s);
  219. #endif // !(defined NDEBUG) || !defined(OS_WIN)
  220. return s;
  221. }
  222. Status Close() override {
  223. // SyncPoint is not supported in Released Windows Mode.
  224. #if !(defined NDEBUG) || !defined(OS_WIN)
  225. // Check preallocation size
  226. // preallocation size is never passed to base file.
  227. size_t preallocation_size = preallocation_block_size();
  228. TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus",
  229. &preallocation_size);
  230. #endif // !(defined NDEBUG) || !defined(OS_WIN)
  231. Status s = base_->Close();
  232. #if !(defined NDEBUG) || !defined(OS_WIN)
  233. TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Close", &s);
  234. #endif // !(defined NDEBUG) || !defined(OS_WIN)
  235. return s;
  236. }
  237. Status Flush() override { return base_->Flush(); }
  238. Status Sync() override {
  239. ++env_->sync_counter_;
  240. while (env_->delay_sstable_sync_.load(std::memory_order_acquire)) {
  241. env_->SleepForMicroseconds(100000);
  242. }
  243. Status s = base_->Sync();
  244. #if !(defined NDEBUG) || !defined(OS_WIN)
  245. TEST_SYNC_POINT_CALLBACK("SpecialEnv::SStableFile::Sync", &s);
  246. #endif // !(defined NDEBUG) || !defined(OS_WIN)
  247. return s;
  248. }
  249. void SetIOPriority(Env::IOPriority pri) override {
  250. base_->SetIOPriority(pri);
  251. }
  252. Env::IOPriority GetIOPriority() override {
  253. return base_->GetIOPriority();
  254. }
  255. bool use_direct_io() const override {
  256. return base_->use_direct_io();
  257. }
  258. Status Allocate(uint64_t offset, uint64_t len) override {
  259. return base_->Allocate(offset, len);
  260. }
  261. };
  262. class ManifestFile : public WritableFile {
  263. public:
  264. ManifestFile(SpecialEnv* env, std::unique_ptr<WritableFile>&& b)
  265. : env_(env), base_(std::move(b)) {}
  266. Status Append(const Slice& data) override {
  267. if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
  268. return Status::IOError("simulated writer error");
  269. } else {
  270. return base_->Append(data);
  271. }
  272. }
  273. Status Truncate(uint64_t size) override { return base_->Truncate(size); }
  274. Status Close() override { return base_->Close(); }
  275. Status Flush() override { return base_->Flush(); }
  276. Status Sync() override {
  277. ++env_->sync_counter_;
  278. if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
  279. return Status::IOError("simulated sync error");
  280. } else {
  281. return base_->Sync();
  282. }
  283. }
  284. uint64_t GetFileSize() override { return base_->GetFileSize(); }
  285. Status Allocate(uint64_t offset, uint64_t len) override {
  286. return base_->Allocate(offset, len);
  287. }
  288. private:
  289. SpecialEnv* env_;
  290. std::unique_ptr<WritableFile> base_;
  291. };
  292. class WalFile : public WritableFile {
  293. public:
  294. WalFile(SpecialEnv* env, std::unique_ptr<WritableFile>&& b)
  295. : env_(env), base_(std::move(b)) {
  296. env_->num_open_wal_file_.fetch_add(1);
  297. }
  298. virtual ~WalFile() { env_->num_open_wal_file_.fetch_add(-1); }
  299. Status Append(const Slice& data) override {
  300. #if !(defined NDEBUG) || !defined(OS_WIN)
  301. TEST_SYNC_POINT("SpecialEnv::WalFile::Append:1");
  302. #endif
  303. Status s;
  304. if (env_->log_write_error_.load(std::memory_order_acquire)) {
  305. s = Status::IOError("simulated writer error");
  306. } else {
  307. int slowdown =
  308. env_->log_write_slowdown_.load(std::memory_order_acquire);
  309. if (slowdown > 0) {
  310. env_->SleepForMicroseconds(slowdown);
  311. }
  312. s = base_->Append(data);
  313. }
  314. #if !(defined NDEBUG) || !defined(OS_WIN)
  315. TEST_SYNC_POINT("SpecialEnv::WalFile::Append:2");
  316. #endif
  317. return s;
  318. }
  319. Status Truncate(uint64_t size) override { return base_->Truncate(size); }
  320. Status Close() override {
  321. // SyncPoint is not supported in Released Windows Mode.
  322. #if !(defined NDEBUG) || !defined(OS_WIN)
  323. // Check preallocation size
  324. // preallocation size is never passed to base file.
  325. size_t preallocation_size = preallocation_block_size();
  326. TEST_SYNC_POINT_CALLBACK("DBTestWalFile.GetPreallocationStatus",
  327. &preallocation_size);
  328. #endif // !(defined NDEBUG) || !defined(OS_WIN)
  329. return base_->Close();
  330. }
  331. Status Flush() override { return base_->Flush(); }
  332. Status Sync() override {
  333. ++env_->sync_counter_;
  334. return base_->Sync();
  335. }
  336. bool IsSyncThreadSafe() const override {
  337. return env_->is_wal_sync_thread_safe_.load();
  338. }
  339. Status Allocate(uint64_t offset, uint64_t len) override {
  340. return base_->Allocate(offset, len);
  341. }
  342. private:
  343. SpecialEnv* env_;
  344. std::unique_ptr<WritableFile> base_;
  345. };
  346. if (non_writeable_rate_.load(std::memory_order_acquire) > 0) {
  347. uint32_t random_number;
  348. {
  349. MutexLock l(&rnd_mutex_);
  350. random_number = rnd_.Uniform(100);
  351. }
  352. if (random_number < non_writeable_rate_.load()) {
  353. return Status::IOError("simulated random write error");
  354. }
  355. }
  356. new_writable_count_++;
  357. if (non_writable_count_.load() > 0) {
  358. non_writable_count_--;
  359. return Status::IOError("simulated write error");
  360. }
  361. EnvOptions optimized = soptions;
  362. if (strstr(f.c_str(), "MANIFEST") != nullptr ||
  363. strstr(f.c_str(), "log") != nullptr) {
  364. optimized.use_mmap_writes = false;
  365. optimized.use_direct_writes = false;
  366. }
  367. Status s = target()->NewWritableFile(f, r, optimized);
  368. if (s.ok()) {
  369. if (strstr(f.c_str(), ".sst") != nullptr) {
  370. r->reset(new SSTableFile(this, std::move(*r)));
  371. } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
  372. r->reset(new ManifestFile(this, std::move(*r)));
  373. } else if (strstr(f.c_str(), "log") != nullptr) {
  374. r->reset(new WalFile(this, std::move(*r)));
  375. }
  376. }
  377. return s;
  378. }
  379. Status NewRandomAccessFile(const std::string& f,
  380. std::unique_ptr<RandomAccessFile>* r,
  381. const EnvOptions& soptions) override {
  382. class CountingFile : public RandomAccessFile {
  383. public:
  384. CountingFile(std::unique_ptr<RandomAccessFile>&& target,
  385. anon::AtomicCounter* counter,
  386. std::atomic<size_t>* bytes_read)
  387. : target_(std::move(target)),
  388. counter_(counter),
  389. bytes_read_(bytes_read) {}
  390. virtual Status Read(uint64_t offset, size_t n, Slice* result,
  391. char* scratch) const override {
  392. counter_->Increment();
  393. Status s = target_->Read(offset, n, result, scratch);
  394. *bytes_read_ += result->size();
  395. return s;
  396. }
  397. virtual Status Prefetch(uint64_t offset, size_t n) override {
  398. Status s = target_->Prefetch(offset, n);
  399. *bytes_read_ += n;
  400. return s;
  401. }
  402. private:
  403. std::unique_ptr<RandomAccessFile> target_;
  404. anon::AtomicCounter* counter_;
  405. std::atomic<size_t>* bytes_read_;
  406. };
  407. Status s = target()->NewRandomAccessFile(f, r, soptions);
  408. random_file_open_counter_++;
  409. if (s.ok() && count_random_reads_) {
  410. r->reset(new CountingFile(std::move(*r), &random_read_counter_,
  411. &random_read_bytes_counter_));
  412. }
  413. if (s.ok() && soptions.compaction_readahead_size > 0) {
  414. compaction_readahead_size_ = soptions.compaction_readahead_size;
  415. }
  416. return s;
  417. }
  418. virtual Status NewSequentialFile(const std::string& f,
  419. std::unique_ptr<SequentialFile>* r,
  420. const EnvOptions& soptions) override {
  421. class CountingFile : public SequentialFile {
  422. public:
  423. CountingFile(std::unique_ptr<SequentialFile>&& target,
  424. anon::AtomicCounter* counter)
  425. : target_(std::move(target)), counter_(counter) {}
  426. virtual Status Read(size_t n, Slice* result, char* scratch) override {
  427. counter_->Increment();
  428. return target_->Read(n, result, scratch);
  429. }
  430. virtual Status Skip(uint64_t n) override { return target_->Skip(n); }
  431. private:
  432. std::unique_ptr<SequentialFile> target_;
  433. anon::AtomicCounter* counter_;
  434. };
  435. Status s = target()->NewSequentialFile(f, r, soptions);
  436. if (s.ok() && count_sequential_reads_) {
  437. r->reset(new CountingFile(std::move(*r), &sequential_read_counter_));
  438. }
  439. return s;
  440. }
  441. virtual void SleepForMicroseconds(int micros) override {
  442. sleep_counter_.Increment();
  443. if (no_slowdown_ || time_elapse_only_sleep_) {
  444. addon_time_.fetch_add(micros);
  445. }
  446. if (!no_slowdown_) {
  447. target()->SleepForMicroseconds(micros);
  448. }
  449. }
  450. virtual Status GetCurrentTime(int64_t* unix_time) override {
  451. Status s;
  452. if (!time_elapse_only_sleep_) {
  453. s = target()->GetCurrentTime(unix_time);
  454. }
  455. if (s.ok()) {
  456. *unix_time += addon_time_.load();
  457. }
  458. return s;
  459. }
  460. virtual uint64_t NowCPUNanos() override {
  461. now_cpu_count_.fetch_add(1);
  462. return target()->NowCPUNanos();
  463. }
  464. virtual uint64_t NowNanos() override {
  465. return (time_elapse_only_sleep_ ? 0 : target()->NowNanos()) +
  466. addon_time_.load() * 1000;
  467. }
  468. virtual uint64_t NowMicros() override {
  469. return (time_elapse_only_sleep_ ? 0 : target()->NowMicros()) +
  470. addon_time_.load();
  471. }
  472. virtual Status DeleteFile(const std::string& fname) override {
  473. delete_count_.fetch_add(1);
  474. return target()->DeleteFile(fname);
  475. }
  476. Random rnd_;
  477. port::Mutex rnd_mutex_; // Lock to pretect rnd_
  478. // sstable Sync() calls are blocked while this pointer is non-nullptr.
  479. std::atomic<bool> delay_sstable_sync_;
  480. // Drop writes on the floor while this pointer is non-nullptr.
  481. std::atomic<bool> drop_writes_;
  482. // Simulate no-space errors while this pointer is non-nullptr.
  483. std::atomic<bool> no_space_;
  484. // Simulate non-writable file system while this pointer is non-nullptr
  485. std::atomic<bool> non_writable_;
  486. // Force sync of manifest files to fail while this pointer is non-nullptr
  487. std::atomic<bool> manifest_sync_error_;
  488. // Force write to manifest files to fail while this pointer is non-nullptr
  489. std::atomic<bool> manifest_write_error_;
  490. // Force write to log files to fail while this pointer is non-nullptr
  491. std::atomic<bool> log_write_error_;
  492. // Slow down every log write, in micro-seconds.
  493. std::atomic<int> log_write_slowdown_;
  494. // Number of WAL files that are still open for write.
  495. std::atomic<int> num_open_wal_file_;
  496. bool count_random_reads_;
  497. anon::AtomicCounter random_read_counter_;
  498. std::atomic<size_t> random_read_bytes_counter_;
  499. std::atomic<int> random_file_open_counter_;
  500. bool count_sequential_reads_;
  501. anon::AtomicCounter sequential_read_counter_;
  502. anon::AtomicCounter sleep_counter_;
  503. std::atomic<int64_t> bytes_written_;
  504. std::atomic<int> sync_counter_;
  505. std::atomic<uint32_t> non_writeable_rate_;
  506. std::atomic<uint32_t> new_writable_count_;
  507. std::atomic<uint32_t> non_writable_count_;
  508. std::function<void()>* table_write_callback_;
  509. std::atomic<int64_t> addon_time_;
  510. std::atomic<int> now_cpu_count_;
  511. std::atomic<int> delete_count_;
  512. std::atomic<bool> time_elapse_only_sleep_;
  513. bool no_slowdown_;
  514. std::atomic<bool> is_wal_sync_thread_safe_{true};
  515. std::atomic<size_t> compaction_readahead_size_{};
  516. };
  517. #ifndef ROCKSDB_LITE
  518. class OnFileDeletionListener : public EventListener {
  519. public:
  520. OnFileDeletionListener() : matched_count_(0), expected_file_name_("") {}
  521. void SetExpectedFileName(const std::string file_name) {
  522. expected_file_name_ = file_name;
  523. }
  524. void VerifyMatchedCount(size_t expected_value) {
  525. ASSERT_EQ(matched_count_, expected_value);
  526. }
  527. void OnTableFileDeleted(const TableFileDeletionInfo& info) override {
  528. if (expected_file_name_ != "") {
  529. ASSERT_EQ(expected_file_name_, info.file_path);
  530. expected_file_name_ = "";
  531. matched_count_++;
  532. }
  533. }
  534. private:
  535. size_t matched_count_;
  536. std::string expected_file_name_;
  537. };
  538. #endif
  539. // A test merge operator mimics put but also fails if one of merge operands is
  540. // "corrupted".
  541. class TestPutOperator : public MergeOperator {
  542. public:
  543. virtual bool FullMergeV2(const MergeOperationInput& merge_in,
  544. MergeOperationOutput* merge_out) const override {
  545. if (merge_in.existing_value != nullptr &&
  546. *(merge_in.existing_value) == "corrupted") {
  547. return false;
  548. }
  549. for (auto value : merge_in.operand_list) {
  550. if (value == "corrupted") {
  551. return false;
  552. }
  553. }
  554. merge_out->existing_operand = merge_in.operand_list.back();
  555. return true;
  556. }
  557. virtual const char* Name() const override { return "TestPutOperator"; }
  558. };
  559. class DBTestBase : public testing::Test {
  560. public:
  561. // Sequence of option configurations to try
  562. enum OptionConfig : int {
  563. kDefault = 0,
  564. kBlockBasedTableWithPrefixHashIndex = 1,
  565. kBlockBasedTableWithWholeKeyHashIndex = 2,
  566. kPlainTableFirstBytePrefix = 3,
  567. kPlainTableCappedPrefix = 4,
  568. kPlainTableCappedPrefixNonMmap = 5,
  569. kPlainTableAllBytesPrefix = 6,
  570. kVectorRep = 7,
  571. kHashLinkList = 8,
  572. kMergePut = 9,
  573. kFilter = 10,
  574. kFullFilterWithNewTableReaderForCompactions = 11,
  575. kUncompressed = 12,
  576. kNumLevel_3 = 13,
  577. kDBLogDir = 14,
  578. kWalDirAndMmapReads = 15,
  579. kManifestFileSize = 16,
  580. kPerfOptions = 17,
  581. kHashSkipList = 18,
  582. kUniversalCompaction = 19,
  583. kUniversalCompactionMultiLevel = 20,
  584. kCompressedBlockCache = 21,
  585. kInfiniteMaxOpenFiles = 22,
  586. kxxHashChecksum = 23,
  587. kFIFOCompaction = 24,
  588. kOptimizeFiltersForHits = 25,
  589. kRowCache = 26,
  590. kRecycleLogFiles = 27,
  591. kConcurrentSkipList = 28,
  592. kPipelinedWrite = 29,
  593. kConcurrentWALWrites = 30,
  594. kDirectIO,
  595. kLevelSubcompactions,
  596. kBlockBasedTableWithIndexRestartInterval,
  597. kBlockBasedTableWithPartitionedIndex,
  598. kBlockBasedTableWithPartitionedIndexFormat4,
  599. kPartitionedFilterWithNewTableReaderForCompactions,
  600. kUniversalSubcompactions,
  601. kxxHash64Checksum,
  602. kUnorderedWrite,
  603. // This must be the last line
  604. kEnd,
  605. };
  606. public:
  607. std::string dbname_;
  608. std::string alternative_wal_dir_;
  609. std::string alternative_db_log_dir_;
  610. MockEnv* mem_env_;
  611. Env* encrypted_env_;
  612. SpecialEnv* env_;
  613. std::shared_ptr<Env> env_guard_;
  614. DB* db_;
  615. std::vector<ColumnFamilyHandle*> handles_;
  616. int option_config_;
  617. Options last_options_;
  618. // Skip some options, as they may not be applicable to a specific test.
  619. // To add more skip constants, use values 4, 8, 16, etc.
  620. enum OptionSkip {
  621. kNoSkip = 0,
  622. kSkipDeletesFilterFirst = 1,
  623. kSkipUniversalCompaction = 2,
  624. kSkipMergePut = 4,
  625. kSkipPlainTable = 8,
  626. kSkipHashIndex = 16,
  627. kSkipNoSeekToLast = 32,
  628. kSkipFIFOCompaction = 128,
  629. kSkipMmapReads = 256,
  630. };
  631. const int kRangeDelSkipConfigs =
  632. // Plain tables do not support range deletions.
  633. kSkipPlainTable |
  634. // MmapReads disables the iterator pinning that RangeDelAggregator
  635. // requires.
  636. kSkipMmapReads;
  637. explicit DBTestBase(const std::string path);
  638. ~DBTestBase();
  639. static std::string RandomString(Random* rnd, int len) {
  640. std::string r;
  641. test::RandomString(rnd, len, &r);
  642. return r;
  643. }
  644. static std::string Key(int i) {
  645. char buf[100];
  646. snprintf(buf, sizeof(buf), "key%06d", i);
  647. return std::string(buf);
  648. }
  649. static bool ShouldSkipOptions(int option_config, int skip_mask = kNoSkip);
  650. // Switch to a fresh database with the next option configuration to
  651. // test. Return false if there are no more configurations to test.
  652. bool ChangeOptions(int skip_mask = kNoSkip);
  653. // Switch between different compaction styles.
  654. bool ChangeCompactOptions();
  655. // Switch between different WAL-realted options.
  656. bool ChangeWalOptions();
  657. // Switch between different filter policy
  658. // Jump from kDefault to kFilter to kFullFilter
  659. bool ChangeFilterOptions();
  660. // Switch between different DB options for file ingestion tests.
  661. bool ChangeOptionsForFileIngestionTest();
  662. // Return the current option configuration.
  663. Options CurrentOptions(const anon::OptionsOverride& options_override =
  664. anon::OptionsOverride()) const;
  665. Options CurrentOptions(const Options& default_options,
  666. const anon::OptionsOverride& options_override =
  667. anon::OptionsOverride()) const;
  668. static Options GetDefaultOptions();
  669. Options GetOptions(int option_config,
  670. const Options& default_options = GetDefaultOptions(),
  671. const anon::OptionsOverride& options_override =
  672. anon::OptionsOverride()) const;
  673. DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
  674. void CreateColumnFamilies(const std::vector<std::string>& cfs,
  675. const Options& options);
  676. void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
  677. const Options& options);
  678. void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
  679. const std::vector<Options>& options);
  680. void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
  681. const Options& options);
  682. Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
  683. const std::vector<Options>& options);
  684. Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
  685. const Options& options);
  686. void Reopen(const Options& options);
  687. void Close();
  688. void DestroyAndReopen(const Options& options);
  689. void Destroy(const Options& options, bool delete_cf_paths = false);
  690. Status ReadOnlyReopen(const Options& options);
  691. Status TryReopen(const Options& options);
  692. bool IsDirectIOSupported();
  693. bool IsMemoryMappedAccessSupported() const;
  694. Status Flush(int cf = 0);
  695. Status Flush(const std::vector<int>& cf_ids);
  696. Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions());
  697. Status Put(int cf, const Slice& k, const Slice& v,
  698. WriteOptions wo = WriteOptions());
  699. Status Merge(const Slice& k, const Slice& v,
  700. WriteOptions wo = WriteOptions());
  701. Status Merge(int cf, const Slice& k, const Slice& v,
  702. WriteOptions wo = WriteOptions());
  703. Status Delete(const std::string& k);
  704. Status Delete(int cf, const std::string& k);
  705. Status SingleDelete(const std::string& k);
  706. Status SingleDelete(int cf, const std::string& k);
  707. bool SetPreserveDeletesSequenceNumber(SequenceNumber sn);
  708. std::string Get(const std::string& k, const Snapshot* snapshot = nullptr);
  709. std::string Get(int cf, const std::string& k,
  710. const Snapshot* snapshot = nullptr);
  711. Status Get(const std::string& k, PinnableSlice* v);
  712. std::vector<std::string> MultiGet(std::vector<int> cfs,
  713. const std::vector<std::string>& k,
  714. const Snapshot* snapshot,
  715. const bool batched);
  716. std::vector<std::string> MultiGet(const std::vector<std::string>& k,
  717. const Snapshot* snapshot = nullptr);
  718. uint64_t GetNumSnapshots();
  719. uint64_t GetTimeOldestSnapshots();
  720. uint64_t GetSequenceOldestSnapshots();
  721. // Return a string that contains all key,value pairs in order,
  722. // formatted like "(k1->v1)(k2->v2)".
  723. std::string Contents(int cf = 0);
  724. std::string AllEntriesFor(const Slice& user_key, int cf = 0);
  725. #ifndef ROCKSDB_LITE
  726. int NumSortedRuns(int cf = 0);
  727. uint64_t TotalSize(int cf = 0);
  728. uint64_t SizeAtLevel(int level);
  729. size_t TotalLiveFiles(int cf = 0);
  730. size_t CountLiveFiles();
  731. int NumTableFilesAtLevel(int level, int cf = 0);
  732. double CompressionRatioAtLevel(int level, int cf = 0);
  733. int TotalTableFiles(int cf = 0, int levels = -1);
  734. #endif // ROCKSDB_LITE
  735. // Return spread of files per level
  736. std::string FilesPerLevel(int cf = 0);
  737. size_t CountFiles();
  738. uint64_t Size(const Slice& start, const Slice& limit, int cf = 0);
  739. void Compact(int cf, const Slice& start, const Slice& limit,
  740. uint32_t target_path_id);
  741. void Compact(int cf, const Slice& start, const Slice& limit);
  742. void Compact(const Slice& start, const Slice& limit);
  743. // Do n memtable compactions, each of which produces an sstable
  744. // covering the range [small,large].
  745. void MakeTables(int n, const std::string& small, const std::string& large,
  746. int cf = 0);
  747. // Prevent pushing of new sstables into deeper levels by adding
  748. // tables that cover a specified range to all levels.
  749. void FillLevels(const std::string& smallest, const std::string& largest,
  750. int cf);
  751. void MoveFilesToLevel(int level, int cf = 0);
  752. #ifndef ROCKSDB_LITE
  753. void DumpFileCounts(const char* label);
  754. #endif // ROCKSDB_LITE
  755. std::string DumpSSTableList();
  756. static void GetSstFiles(Env* env, std::string path,
  757. std::vector<std::string>* files);
  758. int GetSstFileCount(std::string path);
  759. // this will generate non-overlapping files since it keeps increasing key_idx
  760. void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false);
  761. void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false);
  762. static const int kNumKeysByGenerateNewRandomFile;
  763. static const int KNumKeysByGenerateNewFile = 100;
  764. void GenerateNewRandomFile(Random* rnd, bool nowait = false);
  765. std::string IterStatus(Iterator* iter);
  766. Options OptionsForLogIterTest();
  767. std::string DummyString(size_t len, char c = 'a');
  768. void VerifyIterLast(std::string expected_key, int cf = 0);
  769. // Used to test InplaceUpdate
  770. // If previous value is nullptr or delta is > than previous value,
  771. // sets newValue with delta
  772. // If previous value is not empty,
  773. // updates previous value with 'b' string of previous value size - 1.
  774. static UpdateStatus updateInPlaceSmallerSize(char* prevValue,
  775. uint32_t* prevSize, Slice delta,
  776. std::string* newValue);
  777. static UpdateStatus updateInPlaceSmallerVarintSize(char* prevValue,
  778. uint32_t* prevSize,
  779. Slice delta,
  780. std::string* newValue);
  781. static UpdateStatus updateInPlaceLargerSize(char* prevValue,
  782. uint32_t* prevSize, Slice delta,
  783. std::string* newValue);
  784. static UpdateStatus updateInPlaceNoAction(char* prevValue, uint32_t* prevSize,
  785. Slice delta, std::string* newValue);
  786. // Utility method to test InplaceUpdate
  787. void validateNumberOfEntries(int numValues, int cf = 0);
  788. void CopyFile(const std::string& source, const std::string& destination,
  789. uint64_t size = 0);
  790. std::unordered_map<std::string, uint64_t> GetAllSSTFiles(
  791. uint64_t* total_size = nullptr);
  792. std::vector<std::uint64_t> ListTableFiles(Env* env, const std::string& path);
  793. void VerifyDBFromMap(
  794. std::map<std::string, std::string> true_data,
  795. size_t* total_reads_res = nullptr, bool tailing_iter = false,
  796. std::map<std::string, Status> status = std::map<std::string, Status>());
  797. void VerifyDBInternal(
  798. std::vector<std::pair<std::string, std::string>> true_data);
  799. #ifndef ROCKSDB_LITE
  800. uint64_t GetNumberOfSstFilesForColumnFamily(DB* db,
  801. std::string column_family_name);
  802. #endif // ROCKSDB_LITE
  803. uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) {
  804. return options.statistics->getTickerCount(ticker_type);
  805. }
  806. uint64_t TestGetAndResetTickerCount(const Options& options,
  807. Tickers ticker_type) {
  808. return options.statistics->getAndResetTickerCount(ticker_type);
  809. }
  810. };
  811. } // namespace ROCKSDB_NAMESPACE