inlineskiplist_test.cc 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "memtable/inlineskiplist.h"
  10. #include <set>
  11. #include <unordered_set>
  12. #include "memory/concurrent_arena.h"
  13. #include "rocksdb/env.h"
  14. #include "test_util/testharness.h"
  15. #include "util/hash.h"
  16. #include "util/random.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. // Our test skip list stores 8-byte unsigned integers
  19. typedef uint64_t Key;
  20. static const char* Encode(const uint64_t* key) {
  21. return reinterpret_cast<const char*>(key);
  22. }
  23. static Key Decode(const char* key) {
  24. Key rv;
  25. memcpy(&rv, key, sizeof(Key));
  26. return rv;
  27. }
  28. struct TestComparator {
  29. typedef Key DecodedType;
  30. static DecodedType decode_key(const char* b) {
  31. return Decode(b);
  32. }
  33. int operator()(const char* a, const char* b) const {
  34. if (Decode(a) < Decode(b)) {
  35. return -1;
  36. } else if (Decode(a) > Decode(b)) {
  37. return +1;
  38. } else {
  39. return 0;
  40. }
  41. }
  42. int operator()(const char* a, const DecodedType b) const {
  43. if (Decode(a) < b) {
  44. return -1;
  45. } else if (Decode(a) > b) {
  46. return +1;
  47. } else {
  48. return 0;
  49. }
  50. }
  51. };
  52. typedef InlineSkipList<TestComparator> TestInlineSkipList;
  53. class InlineSkipTest : public testing::Test {
  54. public:
  55. void Insert(TestInlineSkipList* list, Key key) {
  56. char* buf = list->AllocateKey(sizeof(Key));
  57. memcpy(buf, &key, sizeof(Key));
  58. list->Insert(buf);
  59. keys_.insert(key);
  60. }
  61. bool InsertWithHint(TestInlineSkipList* list, Key key, void** hint) {
  62. char* buf = list->AllocateKey(sizeof(Key));
  63. memcpy(buf, &key, sizeof(Key));
  64. bool res = list->InsertWithHint(buf, hint);
  65. keys_.insert(key);
  66. return res;
  67. }
  68. void Validate(TestInlineSkipList* list) {
  69. // Check keys exist.
  70. for (Key key : keys_) {
  71. ASSERT_TRUE(list->Contains(Encode(&key)));
  72. }
  73. // Iterate over the list, make sure keys appears in order and no extra
  74. // keys exist.
  75. TestInlineSkipList::Iterator iter(list);
  76. ASSERT_FALSE(iter.Valid());
  77. Key zero = 0;
  78. iter.Seek(Encode(&zero));
  79. for (Key key : keys_) {
  80. ASSERT_TRUE(iter.Valid());
  81. ASSERT_EQ(key, Decode(iter.key()));
  82. iter.Next();
  83. }
  84. ASSERT_FALSE(iter.Valid());
  85. // Validate the list is well-formed.
  86. list->TEST_Validate();
  87. }
  88. private:
  89. std::set<Key> keys_;
  90. };
  91. TEST_F(InlineSkipTest, Empty) {
  92. Arena arena;
  93. TestComparator cmp;
  94. InlineSkipList<TestComparator> list(cmp, &arena);
  95. Key key = 10;
  96. ASSERT_TRUE(!list.Contains(Encode(&key)));
  97. InlineSkipList<TestComparator>::Iterator iter(&list);
  98. ASSERT_TRUE(!iter.Valid());
  99. iter.SeekToFirst();
  100. ASSERT_TRUE(!iter.Valid());
  101. key = 100;
  102. iter.Seek(Encode(&key));
  103. ASSERT_TRUE(!iter.Valid());
  104. iter.SeekForPrev(Encode(&key));
  105. ASSERT_TRUE(!iter.Valid());
  106. iter.SeekToLast();
  107. ASSERT_TRUE(!iter.Valid());
  108. }
  109. TEST_F(InlineSkipTest, InsertAndLookup) {
  110. const int N = 2000;
  111. const int R = 5000;
  112. Random rnd(1000);
  113. std::set<Key> keys;
  114. ConcurrentArena arena;
  115. TestComparator cmp;
  116. InlineSkipList<TestComparator> list(cmp, &arena);
  117. for (int i = 0; i < N; i++) {
  118. Key key = rnd.Next() % R;
  119. if (keys.insert(key).second) {
  120. char* buf = list.AllocateKey(sizeof(Key));
  121. memcpy(buf, &key, sizeof(Key));
  122. list.Insert(buf);
  123. }
  124. }
  125. for (Key i = 0; i < R; i++) {
  126. if (list.Contains(Encode(&i))) {
  127. ASSERT_EQ(keys.count(i), 1U);
  128. } else {
  129. ASSERT_EQ(keys.count(i), 0U);
  130. }
  131. }
  132. // Simple iterator tests
  133. {
  134. InlineSkipList<TestComparator>::Iterator iter(&list);
  135. ASSERT_TRUE(!iter.Valid());
  136. uint64_t zero = 0;
  137. iter.Seek(Encode(&zero));
  138. ASSERT_TRUE(iter.Valid());
  139. ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
  140. uint64_t max_key = R - 1;
  141. iter.SeekForPrev(Encode(&max_key));
  142. ASSERT_TRUE(iter.Valid());
  143. ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
  144. iter.SeekToFirst();
  145. ASSERT_TRUE(iter.Valid());
  146. ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
  147. iter.SeekToLast();
  148. ASSERT_TRUE(iter.Valid());
  149. ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
  150. }
  151. // Forward iteration test
  152. for (Key i = 0; i < R; i++) {
  153. InlineSkipList<TestComparator>::Iterator iter(&list);
  154. iter.Seek(Encode(&i));
  155. // Compare against model iterator
  156. std::set<Key>::iterator model_iter = keys.lower_bound(i);
  157. for (int j = 0; j < 3; j++) {
  158. if (model_iter == keys.end()) {
  159. ASSERT_TRUE(!iter.Valid());
  160. break;
  161. } else {
  162. ASSERT_TRUE(iter.Valid());
  163. ASSERT_EQ(*model_iter, Decode(iter.key()));
  164. ++model_iter;
  165. iter.Next();
  166. }
  167. }
  168. }
  169. // Backward iteration test
  170. for (Key i = 0; i < R; i++) {
  171. InlineSkipList<TestComparator>::Iterator iter(&list);
  172. iter.SeekForPrev(Encode(&i));
  173. // Compare against model iterator
  174. std::set<Key>::iterator model_iter = keys.upper_bound(i);
  175. for (int j = 0; j < 3; j++) {
  176. if (model_iter == keys.begin()) {
  177. ASSERT_TRUE(!iter.Valid());
  178. break;
  179. } else {
  180. ASSERT_TRUE(iter.Valid());
  181. ASSERT_EQ(*--model_iter, Decode(iter.key()));
  182. iter.Prev();
  183. }
  184. }
  185. }
  186. }
  187. TEST_F(InlineSkipTest, InsertWithHint_Sequential) {
  188. const int N = 100000;
  189. Arena arena;
  190. TestComparator cmp;
  191. TestInlineSkipList list(cmp, &arena);
  192. void* hint = nullptr;
  193. for (int i = 0; i < N; i++) {
  194. Key key = i;
  195. InsertWithHint(&list, key, &hint);
  196. }
  197. Validate(&list);
  198. }
  199. TEST_F(InlineSkipTest, InsertWithHint_MultipleHints) {
  200. const int N = 100000;
  201. const int S = 100;
  202. Random rnd(534);
  203. Arena arena;
  204. TestComparator cmp;
  205. TestInlineSkipList list(cmp, &arena);
  206. void* hints[S];
  207. Key last_key[S];
  208. for (int i = 0; i < S; i++) {
  209. hints[i] = nullptr;
  210. last_key[i] = 0;
  211. }
  212. for (int i = 0; i < N; i++) {
  213. Key s = rnd.Uniform(S);
  214. Key key = (s << 32) + (++last_key[s]);
  215. InsertWithHint(&list, key, &hints[s]);
  216. }
  217. Validate(&list);
  218. }
  219. TEST_F(InlineSkipTest, InsertWithHint_MultipleHintsRandom) {
  220. const int N = 100000;
  221. const int S = 100;
  222. Random rnd(534);
  223. Arena arena;
  224. TestComparator cmp;
  225. TestInlineSkipList list(cmp, &arena);
  226. void* hints[S];
  227. for (int i = 0; i < S; i++) {
  228. hints[i] = nullptr;
  229. }
  230. for (int i = 0; i < N; i++) {
  231. Key s = rnd.Uniform(S);
  232. Key key = (s << 32) + rnd.Next();
  233. InsertWithHint(&list, key, &hints[s]);
  234. }
  235. Validate(&list);
  236. }
  237. TEST_F(InlineSkipTest, InsertWithHint_CompatibleWithInsertWithoutHint) {
  238. const int N = 100000;
  239. const int S1 = 100;
  240. const int S2 = 100;
  241. Random rnd(534);
  242. Arena arena;
  243. TestComparator cmp;
  244. TestInlineSkipList list(cmp, &arena);
  245. std::unordered_set<Key> used;
  246. Key with_hint[S1];
  247. Key without_hint[S2];
  248. void* hints[S1];
  249. for (int i = 0; i < S1; i++) {
  250. hints[i] = nullptr;
  251. while (true) {
  252. Key s = rnd.Next();
  253. if (used.insert(s).second) {
  254. with_hint[i] = s;
  255. break;
  256. }
  257. }
  258. }
  259. for (int i = 0; i < S2; i++) {
  260. while (true) {
  261. Key s = rnd.Next();
  262. if (used.insert(s).second) {
  263. without_hint[i] = s;
  264. break;
  265. }
  266. }
  267. }
  268. for (int i = 0; i < N; i++) {
  269. Key s = rnd.Uniform(S1 + S2);
  270. if (s < S1) {
  271. Key key = (with_hint[s] << 32) + rnd.Next();
  272. InsertWithHint(&list, key, &hints[s]);
  273. } else {
  274. Key key = (without_hint[s - S1] << 32) + rnd.Next();
  275. Insert(&list, key);
  276. }
  277. }
  278. Validate(&list);
  279. }
  280. #ifndef ROCKSDB_VALGRIND_RUN
  281. // We want to make sure that with a single writer and multiple
  282. // concurrent readers (with no synchronization other than when a
  283. // reader's iterator is created), the reader always observes all the
  284. // data that was present in the skip list when the iterator was
  285. // constructor. Because insertions are happening concurrently, we may
  286. // also observe new values that were inserted since the iterator was
  287. // constructed, but we should never miss any values that were present
  288. // at iterator construction time.
  289. //
  290. // We generate multi-part keys:
  291. // <key,gen,hash>
  292. // where:
  293. // key is in range [0..K-1]
  294. // gen is a generation number for key
  295. // hash is hash(key,gen)
  296. //
  297. // The insertion code picks a random key, sets gen to be 1 + the last
  298. // generation number inserted for that key, and sets hash to Hash(key,gen).
  299. //
  300. // At the beginning of a read, we snapshot the last inserted
  301. // generation number for each key. We then iterate, including random
  302. // calls to Next() and Seek(). For every key we encounter, we
  303. // check that it is either expected given the initial snapshot or has
  304. // been concurrently added since the iterator started.
  305. class ConcurrentTest {
  306. public:
  307. static const uint32_t K = 8;
  308. private:
  309. static uint64_t key(Key key) { return (key >> 40); }
  310. static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
  311. static uint64_t hash(Key key) { return key & 0xff; }
  312. static uint64_t HashNumbers(uint64_t k, uint64_t g) {
  313. uint64_t data[2] = {k, g};
  314. return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
  315. }
  316. static Key MakeKey(uint64_t k, uint64_t g) {
  317. assert(sizeof(Key) == sizeof(uint64_t));
  318. assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
  319. assert(g <= 0xffffffffu);
  320. return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
  321. }
  322. static bool IsValidKey(Key k) {
  323. return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
  324. }
  325. static Key RandomTarget(Random* rnd) {
  326. switch (rnd->Next() % 10) {
  327. case 0:
  328. // Seek to beginning
  329. return MakeKey(0, 0);
  330. case 1:
  331. // Seek to end
  332. return MakeKey(K, 0);
  333. default:
  334. // Seek to middle
  335. return MakeKey(rnd->Next() % K, 0);
  336. }
  337. }
  338. // Per-key generation
  339. struct State {
  340. std::atomic<int> generation[K];
  341. void Set(int k, int v) {
  342. generation[k].store(v, std::memory_order_release);
  343. }
  344. int Get(int k) { return generation[k].load(std::memory_order_acquire); }
  345. State() {
  346. for (unsigned int k = 0; k < K; k++) {
  347. Set(k, 0);
  348. }
  349. }
  350. };
  351. // Current state of the test
  352. State current_;
  353. ConcurrentArena arena_;
  354. // InlineSkipList is not protected by mu_. We just use a single writer
  355. // thread to modify it.
  356. InlineSkipList<TestComparator> list_;
  357. public:
  358. ConcurrentTest() : list_(TestComparator(), &arena_) {}
  359. // REQUIRES: No concurrent calls to WriteStep or ConcurrentWriteStep
  360. void WriteStep(Random* rnd) {
  361. const uint32_t k = rnd->Next() % K;
  362. const int g = current_.Get(k) + 1;
  363. const Key new_key = MakeKey(k, g);
  364. char* buf = list_.AllocateKey(sizeof(Key));
  365. memcpy(buf, &new_key, sizeof(Key));
  366. list_.Insert(buf);
  367. current_.Set(k, g);
  368. }
  369. // REQUIRES: No concurrent calls for the same k
  370. void ConcurrentWriteStep(uint32_t k, bool use_hint = false) {
  371. const int g = current_.Get(k) + 1;
  372. const Key new_key = MakeKey(k, g);
  373. char* buf = list_.AllocateKey(sizeof(Key));
  374. memcpy(buf, &new_key, sizeof(Key));
  375. if (use_hint) {
  376. void* hint = nullptr;
  377. list_.InsertWithHintConcurrently(buf, &hint);
  378. delete[] reinterpret_cast<char*>(hint);
  379. } else {
  380. list_.InsertConcurrently(buf);
  381. }
  382. ASSERT_EQ(g, current_.Get(k) + 1);
  383. current_.Set(k, g);
  384. }
  385. void ReadStep(Random* rnd) {
  386. // Remember the initial committed state of the skiplist.
  387. State initial_state;
  388. for (unsigned int k = 0; k < K; k++) {
  389. initial_state.Set(k, current_.Get(k));
  390. }
  391. Key pos = RandomTarget(rnd);
  392. InlineSkipList<TestComparator>::Iterator iter(&list_);
  393. iter.Seek(Encode(&pos));
  394. while (true) {
  395. Key current;
  396. if (!iter.Valid()) {
  397. current = MakeKey(K, 0);
  398. } else {
  399. current = Decode(iter.key());
  400. ASSERT_TRUE(IsValidKey(current)) << current;
  401. }
  402. ASSERT_LE(pos, current) << "should not go backwards";
  403. // Verify that everything in [pos,current) was not present in
  404. // initial_state.
  405. while (pos < current) {
  406. ASSERT_LT(key(pos), K) << pos;
  407. // Note that generation 0 is never inserted, so it is ok if
  408. // <*,0,*> is missing.
  409. ASSERT_TRUE((gen(pos) == 0U) ||
  410. (gen(pos) > static_cast<uint64_t>(initial_state.Get(
  411. static_cast<int>(key(pos))))))
  412. << "key: " << key(pos) << "; gen: " << gen(pos)
  413. << "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
  414. // Advance to next key in the valid key space
  415. if (key(pos) < key(current)) {
  416. pos = MakeKey(key(pos) + 1, 0);
  417. } else {
  418. pos = MakeKey(key(pos), gen(pos) + 1);
  419. }
  420. }
  421. if (!iter.Valid()) {
  422. break;
  423. }
  424. if (rnd->Next() % 2) {
  425. iter.Next();
  426. pos = MakeKey(key(pos), gen(pos) + 1);
  427. } else {
  428. Key new_target = RandomTarget(rnd);
  429. if (new_target > pos) {
  430. pos = new_target;
  431. iter.Seek(Encode(&new_target));
  432. }
  433. }
  434. }
  435. }
  436. };
  437. const uint32_t ConcurrentTest::K;
  438. // Simple test that does single-threaded testing of the ConcurrentTest
  439. // scaffolding.
  440. TEST_F(InlineSkipTest, ConcurrentReadWithoutThreads) {
  441. ConcurrentTest test;
  442. Random rnd(test::RandomSeed());
  443. for (int i = 0; i < 10000; i++) {
  444. test.ReadStep(&rnd);
  445. test.WriteStep(&rnd);
  446. }
  447. }
  448. TEST_F(InlineSkipTest, ConcurrentInsertWithoutThreads) {
  449. ConcurrentTest test;
  450. Random rnd(test::RandomSeed());
  451. for (int i = 0; i < 10000; i++) {
  452. test.ReadStep(&rnd);
  453. uint32_t base = rnd.Next();
  454. for (int j = 0; j < 4; ++j) {
  455. test.ConcurrentWriteStep((base + j) % ConcurrentTest::K);
  456. }
  457. }
  458. }
  459. class TestState {
  460. public:
  461. ConcurrentTest t_;
  462. bool use_hint_;
  463. int seed_;
  464. std::atomic<bool> quit_flag_;
  465. std::atomic<uint32_t> next_writer_;
  466. enum ReaderState { STARTING, RUNNING, DONE };
  467. explicit TestState(int s)
  468. : seed_(s),
  469. quit_flag_(false),
  470. state_(STARTING),
  471. pending_writers_(0),
  472. state_cv_(&mu_) {}
  473. void Wait(ReaderState s) {
  474. mu_.Lock();
  475. while (state_ != s) {
  476. state_cv_.Wait();
  477. }
  478. mu_.Unlock();
  479. }
  480. void Change(ReaderState s) {
  481. mu_.Lock();
  482. state_ = s;
  483. state_cv_.Signal();
  484. mu_.Unlock();
  485. }
  486. void AdjustPendingWriters(int delta) {
  487. mu_.Lock();
  488. pending_writers_ += delta;
  489. if (pending_writers_ == 0) {
  490. state_cv_.Signal();
  491. }
  492. mu_.Unlock();
  493. }
  494. void WaitForPendingWriters() {
  495. mu_.Lock();
  496. while (pending_writers_ != 0) {
  497. state_cv_.Wait();
  498. }
  499. mu_.Unlock();
  500. }
  501. private:
  502. port::Mutex mu_;
  503. ReaderState state_;
  504. int pending_writers_;
  505. port::CondVar state_cv_;
  506. };
  507. static void ConcurrentReader(void* arg) {
  508. TestState* state = reinterpret_cast<TestState*>(arg);
  509. Random rnd(state->seed_);
  510. int64_t reads = 0;
  511. state->Change(TestState::RUNNING);
  512. while (!state->quit_flag_.load(std::memory_order_acquire)) {
  513. state->t_.ReadStep(&rnd);
  514. ++reads;
  515. }
  516. state->Change(TestState::DONE);
  517. }
  518. static void ConcurrentWriter(void* arg) {
  519. TestState* state = reinterpret_cast<TestState*>(arg);
  520. uint32_t k = state->next_writer_++ % ConcurrentTest::K;
  521. state->t_.ConcurrentWriteStep(k, state->use_hint_);
  522. state->AdjustPendingWriters(-1);
  523. }
  524. static void RunConcurrentRead(int run) {
  525. const int seed = test::RandomSeed() + (run * 100);
  526. Random rnd(seed);
  527. const int N = 1000;
  528. const int kSize = 1000;
  529. for (int i = 0; i < N; i++) {
  530. if ((i % 100) == 0) {
  531. fprintf(stderr, "Run %d of %d\n", i, N);
  532. }
  533. TestState state(seed + 1);
  534. Env::Default()->SetBackgroundThreads(1);
  535. Env::Default()->Schedule(ConcurrentReader, &state);
  536. state.Wait(TestState::RUNNING);
  537. for (int k = 0; k < kSize; ++k) {
  538. state.t_.WriteStep(&rnd);
  539. }
  540. state.quit_flag_.store(true, std::memory_order_release);
  541. state.Wait(TestState::DONE);
  542. }
  543. }
  544. static void RunConcurrentInsert(int run, bool use_hint = false,
  545. int write_parallelism = 4) {
  546. Env::Default()->SetBackgroundThreads(1 + write_parallelism,
  547. Env::Priority::LOW);
  548. const int seed = test::RandomSeed() + (run * 100);
  549. Random rnd(seed);
  550. const int N = 1000;
  551. const int kSize = 1000;
  552. for (int i = 0; i < N; i++) {
  553. if ((i % 100) == 0) {
  554. fprintf(stderr, "Run %d of %d\n", i, N);
  555. }
  556. TestState state(seed + 1);
  557. state.use_hint_ = use_hint;
  558. Env::Default()->Schedule(ConcurrentReader, &state);
  559. state.Wait(TestState::RUNNING);
  560. for (int k = 0; k < kSize; k += write_parallelism) {
  561. state.next_writer_ = rnd.Next();
  562. state.AdjustPendingWriters(write_parallelism);
  563. for (int p = 0; p < write_parallelism; ++p) {
  564. Env::Default()->Schedule(ConcurrentWriter, &state);
  565. }
  566. state.WaitForPendingWriters();
  567. }
  568. state.quit_flag_.store(true, std::memory_order_release);
  569. state.Wait(TestState::DONE);
  570. }
  571. }
  572. TEST_F(InlineSkipTest, ConcurrentRead1) { RunConcurrentRead(1); }
  573. TEST_F(InlineSkipTest, ConcurrentRead2) { RunConcurrentRead(2); }
  574. TEST_F(InlineSkipTest, ConcurrentRead3) { RunConcurrentRead(3); }
  575. TEST_F(InlineSkipTest, ConcurrentRead4) { RunConcurrentRead(4); }
  576. TEST_F(InlineSkipTest, ConcurrentRead5) { RunConcurrentRead(5); }
  577. TEST_F(InlineSkipTest, ConcurrentInsert1) { RunConcurrentInsert(1); }
  578. TEST_F(InlineSkipTest, ConcurrentInsert2) { RunConcurrentInsert(2); }
  579. TEST_F(InlineSkipTest, ConcurrentInsert3) { RunConcurrentInsert(3); }
  580. TEST_F(InlineSkipTest, ConcurrentInsertWithHint1) {
  581. RunConcurrentInsert(1, true);
  582. }
  583. TEST_F(InlineSkipTest, ConcurrentInsertWithHint2) {
  584. RunConcurrentInsert(2, true);
  585. }
  586. TEST_F(InlineSkipTest, ConcurrentInsertWithHint3) {
  587. RunConcurrentInsert(3, true);
  588. }
  589. #endif // ROCKSDB_VALGRIND_RUN
  590. } // namespace ROCKSDB_NAMESPACE
  591. int main(int argc, char** argv) {
  592. ::testing::InitGoogleTest(&argc, argv);
  593. return RUN_ALL_TESTS();
  594. }