mock_env.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "env/mock_env.h"
  10. #include <algorithm>
  11. #include <chrono>
  12. #include "port/sys_time.h"
  13. #include "util/cast_util.h"
  14. #include "util/murmurhash.h"
  15. #include "util/random.h"
  16. #include "util/rate_limiter.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. class MemFile {
  19. public:
  20. explicit MemFile(Env* env, const std::string& fn, bool _is_lock_file = false)
  21. : env_(env),
  22. fn_(fn),
  23. refs_(0),
  24. is_lock_file_(_is_lock_file),
  25. locked_(false),
  26. size_(0),
  27. modified_time_(Now()),
  28. rnd_(static_cast<uint32_t>(
  29. MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
  30. fsynced_bytes_(0) {}
  31. // No copying allowed.
  32. MemFile(const MemFile&) = delete;
  33. void operator=(const MemFile&) = delete;
  34. void Ref() {
  35. MutexLock lock(&mutex_);
  36. ++refs_;
  37. }
  38. bool is_lock_file() const { return is_lock_file_; }
  39. bool Lock() {
  40. assert(is_lock_file_);
  41. MutexLock lock(&mutex_);
  42. if (locked_) {
  43. return false;
  44. } else {
  45. locked_ = true;
  46. return true;
  47. }
  48. }
  49. void Unlock() {
  50. assert(is_lock_file_);
  51. MutexLock lock(&mutex_);
  52. locked_ = false;
  53. }
  54. void Unref() {
  55. bool do_delete = false;
  56. {
  57. MutexLock lock(&mutex_);
  58. --refs_;
  59. assert(refs_ >= 0);
  60. if (refs_ <= 0) {
  61. do_delete = true;
  62. }
  63. }
  64. if (do_delete) {
  65. delete this;
  66. }
  67. }
  68. uint64_t Size() const { return size_; }
  69. void Truncate(size_t size) {
  70. MutexLock lock(&mutex_);
  71. if (size < size_) {
  72. data_.resize(size);
  73. size_ = size;
  74. }
  75. }
  76. void CorruptBuffer() {
  77. if (fsynced_bytes_ >= size_) {
  78. return;
  79. }
  80. uint64_t buffered_bytes = size_ - fsynced_bytes_;
  81. uint64_t start =
  82. fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
  83. uint64_t end = std::min(start + 512, size_.load());
  84. MutexLock lock(&mutex_);
  85. for (uint64_t pos = start; pos < end; ++pos) {
  86. data_[static_cast<size_t>(pos)] = static_cast<char>(rnd_.Uniform(256));
  87. }
  88. }
  89. Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
  90. MutexLock lock(&mutex_);
  91. const uint64_t available = Size() - std::min(Size(), offset);
  92. size_t offset_ = static_cast<size_t>(offset);
  93. if (n > available) {
  94. n = static_cast<size_t>(available);
  95. }
  96. if (n == 0) {
  97. *result = Slice();
  98. return Status::OK();
  99. }
  100. if (scratch) {
  101. memcpy(scratch, &(data_[offset_]), n);
  102. *result = Slice(scratch, n);
  103. } else {
  104. *result = Slice(&(data_[offset_]), n);
  105. }
  106. return Status::OK();
  107. }
  108. Status Write(uint64_t offset, const Slice& data) {
  109. MutexLock lock(&mutex_);
  110. size_t offset_ = static_cast<size_t>(offset);
  111. if (offset + data.size() > data_.size()) {
  112. data_.resize(offset_ + data.size());
  113. }
  114. data_.replace(offset_, data.size(), data.data(), data.size());
  115. size_ = data_.size();
  116. modified_time_ = Now();
  117. return Status::OK();
  118. }
  119. Status Append(const Slice& data) {
  120. MutexLock lock(&mutex_);
  121. data_.append(data.data(), data.size());
  122. size_ = data_.size();
  123. modified_time_ = Now();
  124. return Status::OK();
  125. }
  126. Status Fsync() {
  127. fsynced_bytes_ = size_.load();
  128. return Status::OK();
  129. }
  130. uint64_t ModifiedTime() const { return modified_time_; }
  131. private:
  132. uint64_t Now() {
  133. int64_t unix_time = 0;
  134. auto s = env_->GetCurrentTime(&unix_time);
  135. assert(s.ok());
  136. return static_cast<uint64_t>(unix_time);
  137. }
  138. // Private since only Unref() should be used to delete it.
  139. ~MemFile() { assert(refs_ == 0); }
  140. Env* env_;
  141. const std::string fn_;
  142. mutable port::Mutex mutex_;
  143. int refs_;
  144. bool is_lock_file_;
  145. bool locked_;
  146. // Data written into this file, all bytes before fsynced_bytes are
  147. // persistent.
  148. std::string data_;
  149. std::atomic<uint64_t> size_;
  150. std::atomic<uint64_t> modified_time_;
  151. Random rnd_;
  152. std::atomic<uint64_t> fsynced_bytes_;
  153. };
  154. namespace {
  155. class MockSequentialFile : public SequentialFile {
  156. public:
  157. explicit MockSequentialFile(MemFile* file) : file_(file), pos_(0) {
  158. file_->Ref();
  159. }
  160. ~MockSequentialFile() override { file_->Unref(); }
  161. Status Read(size_t n, Slice* result, char* scratch) override {
  162. Status s = file_->Read(pos_, n, result, scratch);
  163. if (s.ok()) {
  164. pos_ += result->size();
  165. }
  166. return s;
  167. }
  168. Status Skip(uint64_t n) override {
  169. if (pos_ > file_->Size()) {
  170. return Status::IOError("pos_ > file_->Size()");
  171. }
  172. const uint64_t available = file_->Size() - pos_;
  173. if (n > available) {
  174. n = available;
  175. }
  176. pos_ += static_cast<size_t>(n);
  177. return Status::OK();
  178. }
  179. private:
  180. MemFile* file_;
  181. size_t pos_;
  182. };
  183. class MockRandomAccessFile : public RandomAccessFile {
  184. public:
  185. explicit MockRandomAccessFile(MemFile* file) : file_(file) { file_->Ref(); }
  186. ~MockRandomAccessFile() override { file_->Unref(); }
  187. Status Read(uint64_t offset, size_t n, Slice* result,
  188. char* scratch) const override {
  189. return file_->Read(offset, n, result, scratch);
  190. }
  191. private:
  192. MemFile* file_;
  193. };
  194. class MockRandomRWFile : public RandomRWFile {
  195. public:
  196. explicit MockRandomRWFile(MemFile* file) : file_(file) { file_->Ref(); }
  197. ~MockRandomRWFile() override { file_->Unref(); }
  198. Status Write(uint64_t offset, const Slice& data) override {
  199. return file_->Write(offset, data);
  200. }
  201. Status Read(uint64_t offset, size_t n, Slice* result,
  202. char* scratch) const override {
  203. return file_->Read(offset, n, result, scratch);
  204. }
  205. Status Close() override { return file_->Fsync(); }
  206. Status Flush() override { return Status::OK(); }
  207. Status Sync() override { return file_->Fsync(); }
  208. private:
  209. MemFile* file_;
  210. };
  211. class MockWritableFile : public WritableFile {
  212. public:
  213. MockWritableFile(MemFile* file, RateLimiter* rate_limiter)
  214. : file_(file), rate_limiter_(rate_limiter) {
  215. file_->Ref();
  216. }
  217. ~MockWritableFile() override { file_->Unref(); }
  218. Status Append(const Slice& data) override {
  219. size_t bytes_written = 0;
  220. while (bytes_written < data.size()) {
  221. auto bytes = RequestToken(data.size() - bytes_written);
  222. Status s = file_->Append(Slice(data.data() + bytes_written, bytes));
  223. if (!s.ok()) {
  224. return s;
  225. }
  226. bytes_written += bytes;
  227. }
  228. return Status::OK();
  229. }
  230. Status Truncate(uint64_t size) override {
  231. file_->Truncate(static_cast<size_t>(size));
  232. return Status::OK();
  233. }
  234. Status Close() override { return file_->Fsync(); }
  235. Status Flush() override { return Status::OK(); }
  236. Status Sync() override { return file_->Fsync(); }
  237. uint64_t GetFileSize() override { return file_->Size(); }
  238. private:
  239. inline size_t RequestToken(size_t bytes) {
  240. if (rate_limiter_ && io_priority_ < Env::IO_TOTAL) {
  241. bytes = std::min(
  242. bytes, static_cast<size_t>(rate_limiter_->GetSingleBurstBytes()));
  243. rate_limiter_->Request(bytes, io_priority_);
  244. }
  245. return bytes;
  246. }
  247. MemFile* file_;
  248. RateLimiter* rate_limiter_;
  249. };
  250. class MockEnvDirectory : public Directory {
  251. public:
  252. Status Fsync() override { return Status::OK(); }
  253. };
  254. class MockEnvFileLock : public FileLock {
  255. public:
  256. explicit MockEnvFileLock(const std::string& fname) : fname_(fname) {}
  257. std::string FileName() const { return fname_; }
  258. private:
  259. const std::string fname_;
  260. };
  261. class TestMemLogger : public Logger {
  262. private:
  263. std::unique_ptr<WritableFile> file_;
  264. std::atomic_size_t log_size_;
  265. static const uint64_t flush_every_seconds_ = 5;
  266. std::atomic_uint_fast64_t last_flush_micros_;
  267. Env* env_;
  268. std::atomic<bool> flush_pending_;
  269. public:
  270. TestMemLogger(std::unique_ptr<WritableFile> f, Env* env,
  271. const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
  272. : Logger(log_level),
  273. file_(std::move(f)),
  274. log_size_(0),
  275. last_flush_micros_(0),
  276. env_(env),
  277. flush_pending_(false) {}
  278. ~TestMemLogger() override {}
  279. void Flush() override {
  280. if (flush_pending_) {
  281. flush_pending_ = false;
  282. }
  283. last_flush_micros_ = env_->NowMicros();
  284. }
  285. using Logger::Logv;
  286. void Logv(const char* format, va_list ap) override {
  287. // We try twice: the first time with a fixed-size stack allocated buffer,
  288. // and the second time with a much larger dynamically allocated buffer.
  289. char buffer[500];
  290. for (int iter = 0; iter < 2; iter++) {
  291. char* base;
  292. int bufsize;
  293. if (iter == 0) {
  294. bufsize = sizeof(buffer);
  295. base = buffer;
  296. } else {
  297. bufsize = 30000;
  298. base = new char[bufsize];
  299. }
  300. char* p = base;
  301. char* limit = base + bufsize;
  302. struct timeval now_tv;
  303. gettimeofday(&now_tv, nullptr);
  304. const time_t seconds = now_tv.tv_sec;
  305. struct tm t;
  306. memset(&t, 0, sizeof(t));
  307. struct tm* ret __attribute__((__unused__));
  308. ret = localtime_r(&seconds, &t);
  309. assert(ret);
  310. p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d ",
  311. t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
  312. t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec));
  313. // Print the message
  314. if (p < limit) {
  315. va_list backup_ap;
  316. va_copy(backup_ap, ap);
  317. p += vsnprintf(p, limit - p, format, backup_ap);
  318. va_end(backup_ap);
  319. }
  320. // Truncate to available space if necessary
  321. if (p >= limit) {
  322. if (iter == 0) {
  323. continue; // Try again with larger buffer
  324. } else {
  325. p = limit - 1;
  326. }
  327. }
  328. // Add newline if necessary
  329. if (p == base || p[-1] != '\n') {
  330. *p++ = '\n';
  331. }
  332. assert(p <= limit);
  333. const size_t write_size = p - base;
  334. file_->Append(Slice(base, write_size));
  335. flush_pending_ = true;
  336. log_size_ += write_size;
  337. uint64_t now_micros =
  338. static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
  339. if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
  340. flush_pending_ = false;
  341. last_flush_micros_ = now_micros;
  342. }
  343. if (base != buffer) {
  344. delete[] base;
  345. }
  346. break;
  347. }
  348. }
  349. size_t GetLogFileSize() const override { return log_size_; }
  350. };
  351. } // Anonymous namespace
  352. MockEnv::MockEnv(Env* base_env) : EnvWrapper(base_env), fake_sleep_micros_(0) {}
  353. MockEnv::~MockEnv() {
  354. for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i) {
  355. i->second->Unref();
  356. }
  357. }
  358. // Partial implementation of the Env interface.
  359. Status MockEnv::NewSequentialFile(const std::string& fname,
  360. std::unique_ptr<SequentialFile>* result,
  361. const EnvOptions& /*soptions*/) {
  362. auto fn = NormalizePath(fname);
  363. MutexLock lock(&mutex_);
  364. if (file_map_.find(fn) == file_map_.end()) {
  365. *result = nullptr;
  366. return Status::IOError(fn, "File not found");
  367. }
  368. auto* f = file_map_[fn];
  369. if (f->is_lock_file()) {
  370. return Status::InvalidArgument(fn, "Cannot open a lock file.");
  371. }
  372. result->reset(new MockSequentialFile(f));
  373. return Status::OK();
  374. }
  375. Status MockEnv::NewRandomAccessFile(const std::string& fname,
  376. std::unique_ptr<RandomAccessFile>* result,
  377. const EnvOptions& /*soptions*/) {
  378. auto fn = NormalizePath(fname);
  379. MutexLock lock(&mutex_);
  380. if (file_map_.find(fn) == file_map_.end()) {
  381. *result = nullptr;
  382. return Status::IOError(fn, "File not found");
  383. }
  384. auto* f = file_map_[fn];
  385. if (f->is_lock_file()) {
  386. return Status::InvalidArgument(fn, "Cannot open a lock file.");
  387. }
  388. result->reset(new MockRandomAccessFile(f));
  389. return Status::OK();
  390. }
  391. Status MockEnv::NewRandomRWFile(const std::string& fname,
  392. std::unique_ptr<RandomRWFile>* result,
  393. const EnvOptions& /*soptions*/) {
  394. auto fn = NormalizePath(fname);
  395. MutexLock lock(&mutex_);
  396. if (file_map_.find(fn) == file_map_.end()) {
  397. *result = nullptr;
  398. return Status::IOError(fn, "File not found");
  399. }
  400. auto* f = file_map_[fn];
  401. if (f->is_lock_file()) {
  402. return Status::InvalidArgument(fn, "Cannot open a lock file.");
  403. }
  404. result->reset(new MockRandomRWFile(f));
  405. return Status::OK();
  406. }
  407. Status MockEnv::ReuseWritableFile(const std::string& fname,
  408. const std::string& old_fname,
  409. std::unique_ptr<WritableFile>* result,
  410. const EnvOptions& options) {
  411. auto s = RenameFile(old_fname, fname);
  412. if (!s.ok()) {
  413. return s;
  414. }
  415. result->reset();
  416. return NewWritableFile(fname, result, options);
  417. }
  418. Status MockEnv::NewWritableFile(const std::string& fname,
  419. std::unique_ptr<WritableFile>* result,
  420. const EnvOptions& env_options) {
  421. auto fn = NormalizePath(fname);
  422. MutexLock lock(&mutex_);
  423. if (file_map_.find(fn) != file_map_.end()) {
  424. DeleteFileInternal(fn);
  425. }
  426. MemFile* file = new MemFile(this, fn, false);
  427. file->Ref();
  428. file_map_[fn] = file;
  429. result->reset(new MockWritableFile(file, env_options.rate_limiter));
  430. return Status::OK();
  431. }
  432. Status MockEnv::NewDirectory(const std::string& /*name*/,
  433. std::unique_ptr<Directory>* result) {
  434. result->reset(new MockEnvDirectory());
  435. return Status::OK();
  436. }
  437. Status MockEnv::FileExists(const std::string& fname) {
  438. auto fn = NormalizePath(fname);
  439. MutexLock lock(&mutex_);
  440. if (file_map_.find(fn) != file_map_.end()) {
  441. // File exists
  442. return Status::OK();
  443. }
  444. // Now also check if fn exists as a dir
  445. for (const auto& iter : file_map_) {
  446. const std::string& filename = iter.first;
  447. if (filename.size() >= fn.size() + 1 && filename[fn.size()] == '/' &&
  448. Slice(filename).starts_with(Slice(fn))) {
  449. return Status::OK();
  450. }
  451. }
  452. return Status::NotFound();
  453. }
  454. Status MockEnv::GetChildren(const std::string& dir,
  455. std::vector<std::string>* result) {
  456. auto d = NormalizePath(dir);
  457. bool found_dir = false;
  458. {
  459. MutexLock lock(&mutex_);
  460. result->clear();
  461. for (const auto& iter : file_map_) {
  462. const std::string& filename = iter.first;
  463. if (filename == d) {
  464. found_dir = true;
  465. } else if (filename.size() >= d.size() + 1 && filename[d.size()] == '/' &&
  466. Slice(filename).starts_with(Slice(d))) {
  467. found_dir = true;
  468. size_t next_slash = filename.find('/', d.size() + 1);
  469. if (next_slash != std::string::npos) {
  470. result->push_back(
  471. filename.substr(d.size() + 1, next_slash - d.size() - 1));
  472. } else {
  473. result->push_back(filename.substr(d.size() + 1));
  474. }
  475. }
  476. }
  477. }
  478. result->erase(std::unique(result->begin(), result->end()), result->end());
  479. return found_dir ? Status::OK() : Status::NotFound();
  480. }
  481. void MockEnv::DeleteFileInternal(const std::string& fname) {
  482. assert(fname == NormalizePath(fname));
  483. const auto& pair = file_map_.find(fname);
  484. if (pair != file_map_.end()) {
  485. pair->second->Unref();
  486. file_map_.erase(fname);
  487. }
  488. }
  489. Status MockEnv::DeleteFile(const std::string& fname) {
  490. auto fn = NormalizePath(fname);
  491. MutexLock lock(&mutex_);
  492. if (file_map_.find(fn) == file_map_.end()) {
  493. return Status::IOError(fn, "File not found");
  494. }
  495. DeleteFileInternal(fn);
  496. return Status::OK();
  497. }
  498. Status MockEnv::Truncate(const std::string& fname, size_t size) {
  499. auto fn = NormalizePath(fname);
  500. MutexLock lock(&mutex_);
  501. auto iter = file_map_.find(fn);
  502. if (iter == file_map_.end()) {
  503. return Status::IOError(fn, "File not found");
  504. }
  505. iter->second->Truncate(size);
  506. return Status::OK();
  507. }
  508. Status MockEnv::CreateDir(const std::string& dirname) {
  509. auto dn = NormalizePath(dirname);
  510. if (file_map_.find(dn) == file_map_.end()) {
  511. MemFile* file = new MemFile(this, dn, false);
  512. file->Ref();
  513. file_map_[dn] = file;
  514. } else {
  515. return Status::IOError();
  516. }
  517. return Status::OK();
  518. }
  519. Status MockEnv::CreateDirIfMissing(const std::string& dirname) {
  520. CreateDir(dirname);
  521. return Status::OK();
  522. }
  523. Status MockEnv::DeleteDir(const std::string& dirname) {
  524. return DeleteFile(dirname);
  525. }
  526. Status MockEnv::GetFileSize(const std::string& fname, uint64_t* file_size) {
  527. auto fn = NormalizePath(fname);
  528. MutexLock lock(&mutex_);
  529. auto iter = file_map_.find(fn);
  530. if (iter == file_map_.end()) {
  531. return Status::IOError(fn, "File not found");
  532. }
  533. *file_size = iter->second->Size();
  534. return Status::OK();
  535. }
  536. Status MockEnv::GetFileModificationTime(const std::string& fname,
  537. uint64_t* time) {
  538. auto fn = NormalizePath(fname);
  539. MutexLock lock(&mutex_);
  540. auto iter = file_map_.find(fn);
  541. if (iter == file_map_.end()) {
  542. return Status::IOError(fn, "File not found");
  543. }
  544. *time = iter->second->ModifiedTime();
  545. return Status::OK();
  546. }
  547. Status MockEnv::RenameFile(const std::string& src, const std::string& dest) {
  548. auto s = NormalizePath(src);
  549. auto t = NormalizePath(dest);
  550. MutexLock lock(&mutex_);
  551. if (file_map_.find(s) == file_map_.end()) {
  552. return Status::IOError(s, "File not found");
  553. }
  554. DeleteFileInternal(t);
  555. file_map_[t] = file_map_[s];
  556. file_map_.erase(s);
  557. return Status::OK();
  558. }
  559. Status MockEnv::LinkFile(const std::string& src, const std::string& dest) {
  560. auto s = NormalizePath(src);
  561. auto t = NormalizePath(dest);
  562. MutexLock lock(&mutex_);
  563. if (file_map_.find(s) == file_map_.end()) {
  564. return Status::IOError(s, "File not found");
  565. }
  566. DeleteFileInternal(t);
  567. file_map_[t] = file_map_[s];
  568. file_map_[t]->Ref(); // Otherwise it might get deleted when noone uses s
  569. return Status::OK();
  570. }
  571. Status MockEnv::NewLogger(const std::string& fname,
  572. std::shared_ptr<Logger>* result) {
  573. auto fn = NormalizePath(fname);
  574. MutexLock lock(&mutex_);
  575. auto iter = file_map_.find(fn);
  576. MemFile* file = nullptr;
  577. if (iter == file_map_.end()) {
  578. file = new MemFile(this, fn, false);
  579. file->Ref();
  580. file_map_[fn] = file;
  581. } else {
  582. file = iter->second;
  583. }
  584. std::unique_ptr<WritableFile> f(new MockWritableFile(file, nullptr));
  585. result->reset(new TestMemLogger(std::move(f), this));
  586. return Status::OK();
  587. }
  588. Status MockEnv::LockFile(const std::string& fname, FileLock** flock) {
  589. auto fn = NormalizePath(fname);
  590. {
  591. MutexLock lock(&mutex_);
  592. if (file_map_.find(fn) != file_map_.end()) {
  593. if (!file_map_[fn]->is_lock_file()) {
  594. return Status::InvalidArgument(fname, "Not a lock file.");
  595. }
  596. if (!file_map_[fn]->Lock()) {
  597. return Status::IOError(fn, "Lock is already held.");
  598. }
  599. } else {
  600. auto* file = new MemFile(this, fn, true);
  601. file->Ref();
  602. file->Lock();
  603. file_map_[fn] = file;
  604. }
  605. }
  606. *flock = new MockEnvFileLock(fn);
  607. return Status::OK();
  608. }
  609. Status MockEnv::UnlockFile(FileLock* flock) {
  610. std::string fn =
  611. static_cast_with_check<MockEnvFileLock, FileLock>(flock)->FileName();
  612. {
  613. MutexLock lock(&mutex_);
  614. if (file_map_.find(fn) != file_map_.end()) {
  615. if (!file_map_[fn]->is_lock_file()) {
  616. return Status::InvalidArgument(fn, "Not a lock file.");
  617. }
  618. file_map_[fn]->Unlock();
  619. }
  620. }
  621. delete flock;
  622. return Status::OK();
  623. }
  624. Status MockEnv::GetTestDirectory(std::string* path) {
  625. *path = "/test";
  626. return Status::OK();
  627. }
  628. Status MockEnv::GetCurrentTime(int64_t* unix_time) {
  629. auto s = EnvWrapper::GetCurrentTime(unix_time);
  630. if (s.ok()) {
  631. *unix_time += fake_sleep_micros_.load() / (1000 * 1000);
  632. }
  633. return s;
  634. }
  635. uint64_t MockEnv::NowMicros() {
  636. return EnvWrapper::NowMicros() + fake_sleep_micros_.load();
  637. }
  638. uint64_t MockEnv::NowNanos() {
  639. return EnvWrapper::NowNanos() + fake_sleep_micros_.load() * 1000;
  640. }
  641. Status MockEnv::CorruptBuffer(const std::string& fname) {
  642. auto fn = NormalizePath(fname);
  643. MutexLock lock(&mutex_);
  644. auto iter = file_map_.find(fn);
  645. if (iter == file_map_.end()) {
  646. return Status::IOError(fn, "File not found");
  647. }
  648. iter->second->CorruptBuffer();
  649. return Status::OK();
  650. }
  651. std::string MockEnv::NormalizePath(const std::string path) {
  652. std::string dst;
  653. for (auto c : path) {
  654. if (!dst.empty() && c == '/' && dst.back() == '/') {
  655. continue;
  656. }
  657. dst.push_back(c);
  658. }
  659. return dst;
  660. }
  661. void MockEnv::FakeSleepForMicroseconds(int64_t micros) {
  662. fake_sleep_micros_.fetch_add(micros);
  663. }
  664. #ifndef ROCKSDB_LITE
  665. // This is to maintain the behavior before swithcing from InMemoryEnv to MockEnv
  666. Env* NewMemEnv(Env* base_env) { return new MockEnv(base_env); }
  667. #else // ROCKSDB_LITE
  668. Env* NewMemEnv(Env* /*base_env*/) { return nullptr; }
  669. #endif // !ROCKSDB_LITE
  670. } // namespace ROCKSDB_NAMESPACE