inlineskiplist_test.cc 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "memtable/inlineskiplist.h"
  10. #include <set>
  11. #include <unordered_set>
  12. #include "memory/concurrent_arena.h"
  13. #include "rocksdb/env.h"
  14. #include "test_util/testharness.h"
  15. #include "util/hash.h"
  16. #include "util/random.h"
  17. namespace ROCKSDB_NAMESPACE {
  18. // Our test skip list stores 8-byte unsigned integers
  19. using Key = uint64_t;
  20. static const char* Encode(const uint64_t* key) {
  21. return reinterpret_cast<const char*>(key);
  22. }
  23. static Key Decode(const char* key) {
  24. Key rv;
  25. memcpy(&rv, key, sizeof(Key));
  26. return rv;
  27. }
  28. struct TestComparator {
  29. using DecodedType = Key;
  30. static DecodedType decode_key(const char* b) { return Decode(b); }
  31. int operator()(const char* a, const char* b) const {
  32. if (Decode(a) < Decode(b)) {
  33. return -1;
  34. } else if (Decode(a) > Decode(b)) {
  35. return +1;
  36. } else {
  37. return 0;
  38. }
  39. }
  40. int operator()(const char* a, const DecodedType b) const {
  41. if (Decode(a) < b) {
  42. return -1;
  43. } else if (Decode(a) > b) {
  44. return +1;
  45. } else {
  46. return 0;
  47. }
  48. }
  49. };
  50. using TestInlineSkipList = InlineSkipList<TestComparator>;
  51. class InlineSkipTest : public testing::Test {
  52. public:
  53. void Insert(TestInlineSkipList* list, Key key) {
  54. char* buf = list->AllocateKey(sizeof(Key));
  55. memcpy(buf, &key, sizeof(Key));
  56. list->Insert(buf);
  57. keys_.insert(key);
  58. }
  59. bool InsertWithHint(TestInlineSkipList* list, Key key, void** hint) {
  60. char* buf = list->AllocateKey(sizeof(Key));
  61. memcpy(buf, &key, sizeof(Key));
  62. bool res = list->InsertWithHint(buf, hint);
  63. keys_.insert(key);
  64. return res;
  65. }
  66. void Validate(TestInlineSkipList* list) {
  67. // Check keys exist.
  68. for (Key key : keys_) {
  69. ASSERT_TRUE(list->Contains(Encode(&key)));
  70. }
  71. // Iterate over the list, make sure keys appears in order and no extra
  72. // keys exist.
  73. TestInlineSkipList::Iterator iter(list);
  74. ASSERT_FALSE(iter.Valid());
  75. Key zero = 0;
  76. iter.Seek(Encode(&zero));
  77. for (Key key : keys_) {
  78. ASSERT_TRUE(iter.Valid());
  79. ASSERT_EQ(key, Decode(iter.key()));
  80. iter.Next();
  81. }
  82. ASSERT_FALSE(iter.Valid());
  83. // Validate the list is well-formed.
  84. list->TEST_Validate();
  85. }
  86. private:
  87. std::set<Key> keys_;
  88. };
  89. TEST_F(InlineSkipTest, Empty) {
  90. Arena arena;
  91. TestComparator cmp;
  92. InlineSkipList<TestComparator> list(cmp, &arena);
  93. Key key = 10;
  94. ASSERT_TRUE(!list.Contains(Encode(&key)));
  95. InlineSkipList<TestComparator>::Iterator iter(&list);
  96. ASSERT_TRUE(!iter.Valid());
  97. iter.SeekToFirst();
  98. ASSERT_TRUE(!iter.Valid());
  99. key = 100;
  100. iter.Seek(Encode(&key));
  101. ASSERT_TRUE(!iter.Valid());
  102. iter.SeekForPrev(Encode(&key));
  103. ASSERT_TRUE(!iter.Valid());
  104. iter.SeekToLast();
  105. ASSERT_TRUE(!iter.Valid());
  106. }
  107. TEST_F(InlineSkipTest, InsertAndLookup) {
  108. const int N = 2000;
  109. const int R = 5000;
  110. Random rnd(1000);
  111. std::set<Key> keys;
  112. ConcurrentArena arena;
  113. TestComparator cmp;
  114. InlineSkipList<TestComparator> list(cmp, &arena);
  115. for (int i = 0; i < N; i++) {
  116. Key key = rnd.Next() % R;
  117. if (keys.insert(key).second) {
  118. char* buf = list.AllocateKey(sizeof(Key));
  119. memcpy(buf, &key, sizeof(Key));
  120. list.Insert(buf);
  121. }
  122. }
  123. for (Key i = 0; i < R; i++) {
  124. if (list.Contains(Encode(&i))) {
  125. ASSERT_EQ(keys.count(i), 1U);
  126. } else {
  127. ASSERT_EQ(keys.count(i), 0U);
  128. }
  129. }
  130. // Simple iterator tests
  131. {
  132. InlineSkipList<TestComparator>::Iterator iter(&list);
  133. ASSERT_TRUE(!iter.Valid());
  134. uint64_t zero = 0;
  135. iter.Seek(Encode(&zero));
  136. ASSERT_TRUE(iter.Valid());
  137. ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
  138. uint64_t max_key = R - 1;
  139. iter.SeekForPrev(Encode(&max_key));
  140. ASSERT_TRUE(iter.Valid());
  141. ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
  142. iter.SeekToFirst();
  143. ASSERT_TRUE(iter.Valid());
  144. ASSERT_EQ(*(keys.begin()), Decode(iter.key()));
  145. iter.SeekToLast();
  146. ASSERT_TRUE(iter.Valid());
  147. ASSERT_EQ(*(keys.rbegin()), Decode(iter.key()));
  148. }
  149. // Forward iteration test
  150. for (Key i = 0; i < R; i++) {
  151. InlineSkipList<TestComparator>::Iterator iter(&list);
  152. iter.Seek(Encode(&i));
  153. // Compare against model iterator
  154. std::set<Key>::iterator model_iter = keys.lower_bound(i);
  155. for (int j = 0; j < 3; j++) {
  156. if (model_iter == keys.end()) {
  157. ASSERT_TRUE(!iter.Valid());
  158. break;
  159. } else {
  160. ASSERT_TRUE(iter.Valid());
  161. ASSERT_EQ(*model_iter, Decode(iter.key()));
  162. ++model_iter;
  163. iter.Next();
  164. }
  165. }
  166. }
  167. // Backward iteration test
  168. for (Key i = 0; i < R; i++) {
  169. InlineSkipList<TestComparator>::Iterator iter(&list);
  170. iter.SeekForPrev(Encode(&i));
  171. // Compare against model iterator
  172. std::set<Key>::iterator model_iter = keys.upper_bound(i);
  173. for (int j = 0; j < 3; j++) {
  174. if (model_iter == keys.begin()) {
  175. ASSERT_TRUE(!iter.Valid());
  176. break;
  177. } else {
  178. ASSERT_TRUE(iter.Valid());
  179. ASSERT_EQ(*--model_iter, Decode(iter.key()));
  180. iter.Prev();
  181. }
  182. }
  183. }
  184. }
  185. TEST_F(InlineSkipTest, InsertWithHint_Sequential) {
  186. const int N = 100000;
  187. Arena arena;
  188. TestComparator cmp;
  189. TestInlineSkipList list(cmp, &arena);
  190. void* hint = nullptr;
  191. for (int i = 0; i < N; i++) {
  192. Key key = i;
  193. InsertWithHint(&list, key, &hint);
  194. }
  195. Validate(&list);
  196. }
  197. TEST_F(InlineSkipTest, InsertWithHint_MultipleHints) {
  198. const int N = 100000;
  199. const int S = 100;
  200. Random rnd(534);
  201. Arena arena;
  202. TestComparator cmp;
  203. TestInlineSkipList list(cmp, &arena);
  204. void* hints[S];
  205. Key last_key[S];
  206. for (int i = 0; i < S; i++) {
  207. hints[i] = nullptr;
  208. last_key[i] = 0;
  209. }
  210. for (int i = 0; i < N; i++) {
  211. Key s = rnd.Uniform(S);
  212. Key key = (s << 32) + (++last_key[s]);
  213. InsertWithHint(&list, key, &hints[s]);
  214. }
  215. Validate(&list);
  216. }
  217. TEST_F(InlineSkipTest, InsertWithHint_MultipleHintsRandom) {
  218. const int N = 100000;
  219. const int S = 100;
  220. Random rnd(534);
  221. Arena arena;
  222. TestComparator cmp;
  223. TestInlineSkipList list(cmp, &arena);
  224. void* hints[S];
  225. for (int i = 0; i < S; i++) {
  226. hints[i] = nullptr;
  227. }
  228. for (int i = 0; i < N; i++) {
  229. Key s = rnd.Uniform(S);
  230. Key key = (s << 32) + rnd.Next();
  231. InsertWithHint(&list, key, &hints[s]);
  232. }
  233. Validate(&list);
  234. }
  235. TEST_F(InlineSkipTest, InsertWithHint_CompatibleWithInsertWithoutHint) {
  236. const int N = 100000;
  237. const int S1 = 100;
  238. const int S2 = 100;
  239. Random rnd(534);
  240. Arena arena;
  241. TestComparator cmp;
  242. TestInlineSkipList list(cmp, &arena);
  243. std::unordered_set<Key> used;
  244. Key with_hint[S1];
  245. Key without_hint[S2];
  246. void* hints[S1];
  247. for (int i = 0; i < S1; i++) {
  248. hints[i] = nullptr;
  249. while (true) {
  250. Key s = rnd.Next();
  251. if (used.insert(s).second) {
  252. with_hint[i] = s;
  253. break;
  254. }
  255. }
  256. }
  257. for (int i = 0; i < S2; i++) {
  258. while (true) {
  259. Key s = rnd.Next();
  260. if (used.insert(s).second) {
  261. without_hint[i] = s;
  262. break;
  263. }
  264. }
  265. }
  266. for (int i = 0; i < N; i++) {
  267. Key s = rnd.Uniform(S1 + S2);
  268. if (s < S1) {
  269. Key key = (with_hint[s] << 32) + rnd.Next();
  270. InsertWithHint(&list, key, &hints[s]);
  271. } else {
  272. Key key = (without_hint[s - S1] << 32) + rnd.Next();
  273. Insert(&list, key);
  274. }
  275. }
  276. Validate(&list);
  277. }
  278. #if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
  279. // We want to make sure that with a single writer and multiple
  280. // concurrent readers (with no synchronization other than when a
  281. // reader's iterator is created), the reader always observes all the
  282. // data that was present in the skip list when the iterator was
  283. // constructor. Because insertions are happening concurrently, we may
  284. // also observe new values that were inserted since the iterator was
  285. // constructed, but we should never miss any values that were present
  286. // at iterator construction time.
  287. //
  288. // We generate multi-part keys:
  289. // <key,gen,hash>
  290. // where:
  291. // key is in range [0..K-1]
  292. // gen is a generation number for key
  293. // hash is hash(key,gen)
  294. //
  295. // The insertion code picks a random key, sets gen to be 1 + the last
  296. // generation number inserted for that key, and sets hash to Hash(key,gen).
  297. //
  298. // At the beginning of a read, we snapshot the last inserted
  299. // generation number for each key. We then iterate, including random
  300. // calls to Next() and Seek(). For every key we encounter, we
  301. // check that it is either expected given the initial snapshot or has
  302. // been concurrently added since the iterator started.
  303. class ConcurrentTest {
  304. public:
  305. static const uint32_t K = 8;
  306. private:
  307. static uint64_t key(Key key) { return (key >> 40); }
  308. static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
  309. static uint64_t hash(Key key) { return key & 0xff; }
  310. static uint64_t HashNumbers(uint64_t k, uint64_t g) {
  311. uint64_t data[2] = {k, g};
  312. return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
  313. }
  314. static Key MakeKey(uint64_t k, uint64_t g) {
  315. assert(sizeof(Key) == sizeof(uint64_t));
  316. assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
  317. assert(g <= 0xffffffffu);
  318. return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
  319. }
  320. static bool IsValidKey(Key k) {
  321. return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
  322. }
  323. static Key RandomTarget(Random* rnd) {
  324. switch (rnd->Next() % 10) {
  325. case 0:
  326. // Seek to beginning
  327. return MakeKey(0, 0);
  328. case 1:
  329. // Seek to end
  330. return MakeKey(K, 0);
  331. default:
  332. // Seek to middle
  333. return MakeKey(rnd->Next() % K, 0);
  334. }
  335. }
  336. // Per-key generation
  337. struct State {
  338. std::atomic<int> generation[K];
  339. void Set(int k, int v) {
  340. generation[k].store(v, std::memory_order_release);
  341. }
  342. int Get(int k) { return generation[k].load(std::memory_order_acquire); }
  343. State() {
  344. for (unsigned int k = 0; k < K; k++) {
  345. Set(k, 0);
  346. }
  347. }
  348. };
  349. // Current state of the test
  350. State current_;
  351. ConcurrentArena arena_;
  352. // InlineSkipList is not protected by mu_. We just use a single writer
  353. // thread to modify it.
  354. InlineSkipList<TestComparator> list_;
  355. public:
  356. ConcurrentTest() : list_(TestComparator(), &arena_) {}
  357. // REQUIRES: No concurrent calls to WriteStep or ConcurrentWriteStep
  358. void WriteStep(Random* rnd) {
  359. const uint32_t k = rnd->Next() % K;
  360. const int g = current_.Get(k) + 1;
  361. const Key new_key = MakeKey(k, g);
  362. char* buf = list_.AllocateKey(sizeof(Key));
  363. memcpy(buf, &new_key, sizeof(Key));
  364. list_.Insert(buf);
  365. current_.Set(k, g);
  366. }
  367. // REQUIRES: No concurrent calls for the same k
  368. void ConcurrentWriteStep(uint32_t k, bool use_hint = false) {
  369. const int g = current_.Get(k) + 1;
  370. const Key new_key = MakeKey(k, g);
  371. char* buf = list_.AllocateKey(sizeof(Key));
  372. memcpy(buf, &new_key, sizeof(Key));
  373. if (use_hint) {
  374. void* hint = nullptr;
  375. list_.InsertWithHintConcurrently(buf, &hint);
  376. delete[] reinterpret_cast<char*>(hint);
  377. } else {
  378. list_.InsertConcurrently(buf);
  379. }
  380. ASSERT_EQ(g, current_.Get(k) + 1);
  381. current_.Set(k, g);
  382. }
  383. void ReadStep(Random* rnd) {
  384. // Remember the initial committed state of the skiplist.
  385. State initial_state;
  386. for (unsigned int k = 0; k < K; k++) {
  387. initial_state.Set(k, current_.Get(k));
  388. }
  389. Key pos = RandomTarget(rnd);
  390. InlineSkipList<TestComparator>::Iterator iter(&list_);
  391. iter.Seek(Encode(&pos));
  392. while (true) {
  393. Key current;
  394. if (!iter.Valid()) {
  395. current = MakeKey(K, 0);
  396. } else {
  397. current = Decode(iter.key());
  398. ASSERT_TRUE(IsValidKey(current)) << current;
  399. }
  400. ASSERT_LE(pos, current) << "should not go backwards";
  401. // Verify that everything in [pos,current) was not present in
  402. // initial_state.
  403. while (pos < current) {
  404. ASSERT_LT(key(pos), K) << pos;
  405. // Note that generation 0 is never inserted, so it is ok if
  406. // <*,0,*> is missing.
  407. ASSERT_TRUE((gen(pos) == 0U) ||
  408. (gen(pos) > static_cast<uint64_t>(initial_state.Get(
  409. static_cast<int>(key(pos))))))
  410. << "key: " << key(pos) << "; gen: " << gen(pos)
  411. << "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
  412. // Advance to next key in the valid key space
  413. if (key(pos) < key(current)) {
  414. pos = MakeKey(key(pos) + 1, 0);
  415. } else {
  416. pos = MakeKey(key(pos), gen(pos) + 1);
  417. }
  418. }
  419. if (!iter.Valid()) {
  420. break;
  421. }
  422. if (rnd->Next() % 2) {
  423. iter.Next();
  424. pos = MakeKey(key(pos), gen(pos) + 1);
  425. } else {
  426. Key new_target = RandomTarget(rnd);
  427. if (new_target > pos) {
  428. pos = new_target;
  429. iter.Seek(Encode(&new_target));
  430. }
  431. }
  432. }
  433. }
  434. };
  435. const uint32_t ConcurrentTest::K;
  436. // Simple test that does single-threaded testing of the ConcurrentTest
  437. // scaffolding.
  438. TEST_F(InlineSkipTest, ConcurrentReadWithoutThreads) {
  439. ConcurrentTest test;
  440. Random rnd(test::RandomSeed());
  441. for (int i = 0; i < 10000; i++) {
  442. test.ReadStep(&rnd);
  443. test.WriteStep(&rnd);
  444. }
  445. }
  446. TEST_F(InlineSkipTest, ConcurrentInsertWithoutThreads) {
  447. ConcurrentTest test;
  448. Random rnd(test::RandomSeed());
  449. for (int i = 0; i < 10000; i++) {
  450. test.ReadStep(&rnd);
  451. uint32_t base = rnd.Next();
  452. for (int j = 0; j < 4; ++j) {
  453. test.ConcurrentWriteStep((base + j) % ConcurrentTest::K);
  454. }
  455. }
  456. }
  457. class TestState {
  458. public:
  459. ConcurrentTest t_;
  460. bool use_hint_;
  461. int seed_;
  462. std::atomic<bool> quit_flag_;
  463. std::atomic<uint32_t> next_writer_;
  464. enum ReaderState { STARTING, RUNNING, DONE };
  465. explicit TestState(int s)
  466. : seed_(s),
  467. quit_flag_(false),
  468. state_(STARTING),
  469. pending_writers_(0),
  470. state_cv_(&mu_) {}
  471. void Wait(ReaderState s) {
  472. mu_.Lock();
  473. while (state_ != s) {
  474. state_cv_.Wait();
  475. }
  476. mu_.Unlock();
  477. }
  478. void Change(ReaderState s) {
  479. mu_.Lock();
  480. state_ = s;
  481. state_cv_.Signal();
  482. mu_.Unlock();
  483. }
  484. void AdjustPendingWriters(int delta) {
  485. mu_.Lock();
  486. pending_writers_ += delta;
  487. if (pending_writers_ == 0) {
  488. state_cv_.Signal();
  489. }
  490. mu_.Unlock();
  491. }
  492. void WaitForPendingWriters() {
  493. mu_.Lock();
  494. while (pending_writers_ != 0) {
  495. state_cv_.Wait();
  496. }
  497. mu_.Unlock();
  498. }
  499. private:
  500. port::Mutex mu_;
  501. ReaderState state_;
  502. int pending_writers_;
  503. port::CondVar state_cv_;
  504. };
  505. static void ConcurrentReader(void* arg) {
  506. TestState* state = static_cast<TestState*>(arg);
  507. Random rnd(state->seed_);
  508. int64_t reads = 0;
  509. state->Change(TestState::RUNNING);
  510. while (!state->quit_flag_.load(std::memory_order_acquire)) {
  511. state->t_.ReadStep(&rnd);
  512. ++reads;
  513. }
  514. (void)reads;
  515. state->Change(TestState::DONE);
  516. }
  517. static void ConcurrentWriter(void* arg) {
  518. TestState* state = static_cast<TestState*>(arg);
  519. uint32_t k = state->next_writer_++ % ConcurrentTest::K;
  520. state->t_.ConcurrentWriteStep(k, state->use_hint_);
  521. state->AdjustPendingWriters(-1);
  522. }
  523. static void RunConcurrentRead(int run) {
  524. const int seed = test::RandomSeed() + (run * 100);
  525. Random rnd(seed);
  526. const int N = 1000;
  527. const int kSize = 1000;
  528. for (int i = 0; i < N; i++) {
  529. if ((i % 100) == 0) {
  530. fprintf(stderr, "Run %d of %d\n", i, N);
  531. }
  532. TestState state(seed + 1);
  533. Env::Default()->SetBackgroundThreads(1);
  534. Env::Default()->Schedule(ConcurrentReader, &state);
  535. state.Wait(TestState::RUNNING);
  536. for (int k = 0; k < kSize; ++k) {
  537. state.t_.WriteStep(&rnd);
  538. }
  539. state.quit_flag_.store(true, std::memory_order_release);
  540. state.Wait(TestState::DONE);
  541. }
  542. }
  543. static void RunConcurrentInsert(int run, bool use_hint = false,
  544. int write_parallelism = 4) {
  545. Env::Default()->SetBackgroundThreads(1 + write_parallelism,
  546. Env::Priority::LOW);
  547. const int seed = test::RandomSeed() + (run * 100);
  548. Random rnd(seed);
  549. const int N = 1000;
  550. const int kSize = 1000;
  551. for (int i = 0; i < N; i++) {
  552. if ((i % 100) == 0) {
  553. fprintf(stderr, "Run %d of %d\n", i, N);
  554. }
  555. TestState state(seed + 1);
  556. state.use_hint_ = use_hint;
  557. Env::Default()->Schedule(ConcurrentReader, &state);
  558. state.Wait(TestState::RUNNING);
  559. for (int k = 0; k < kSize; k += write_parallelism) {
  560. state.next_writer_ = rnd.Next();
  561. state.AdjustPendingWriters(write_parallelism);
  562. for (int p = 0; p < write_parallelism; ++p) {
  563. Env::Default()->Schedule(ConcurrentWriter, &state);
  564. }
  565. state.WaitForPendingWriters();
  566. }
  567. state.quit_flag_.store(true, std::memory_order_release);
  568. state.Wait(TestState::DONE);
  569. }
  570. }
  571. TEST_F(InlineSkipTest, ConcurrentRead1) { RunConcurrentRead(1); }
  572. TEST_F(InlineSkipTest, ConcurrentRead2) { RunConcurrentRead(2); }
  573. TEST_F(InlineSkipTest, ConcurrentRead3) { RunConcurrentRead(3); }
  574. TEST_F(InlineSkipTest, ConcurrentRead4) { RunConcurrentRead(4); }
  575. TEST_F(InlineSkipTest, ConcurrentRead5) { RunConcurrentRead(5); }
  576. TEST_F(InlineSkipTest, ConcurrentInsert1) { RunConcurrentInsert(1); }
  577. TEST_F(InlineSkipTest, ConcurrentInsert2) { RunConcurrentInsert(2); }
  578. TEST_F(InlineSkipTest, ConcurrentInsert3) { RunConcurrentInsert(3); }
  579. TEST_F(InlineSkipTest, ConcurrentInsertWithHint1) {
  580. RunConcurrentInsert(1, true);
  581. }
  582. TEST_F(InlineSkipTest, ConcurrentInsertWithHint2) {
  583. RunConcurrentInsert(2, true);
  584. }
  585. TEST_F(InlineSkipTest, ConcurrentInsertWithHint3) {
  586. RunConcurrentInsert(3, true);
  587. }
  588. #endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
  589. } // namespace ROCKSDB_NAMESPACE
  590. int main(int argc, char** argv) {
  591. ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
  592. ::testing::InitGoogleTest(&argc, argv);
  593. return RUN_ALL_TESTS();
  594. }