block_cache_pysim_test.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. #!/usr/bin/env python3
  2. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
  3. import os
  4. import random
  5. import sys
  6. from block_cache_pysim import (
  7. ARCCache,
  8. CacheEntry,
  9. GDSizeCache,
  10. HashTable,
  11. HyperbolicPolicy,
  12. LFUPolicy,
  13. LinUCBCache,
  14. LRUCache,
  15. LRUPolicy,
  16. MRUPolicy,
  17. OPTCache,
  18. OPTCacheEntry,
  19. ThompsonSamplingCache,
  20. TraceCache,
  21. TraceRecord,
  22. create_cache,
  23. kMicrosInSecond,
  24. kSampleSize,
  25. run,
  26. )
  27. def test_hash_table():
  28. print("Test hash table")
  29. table = HashTable()
  30. data_size = 10000
  31. for i in range(data_size):
  32. table.insert("k{}".format(i), i, "v{}".format(i))
  33. for i in range(data_size):
  34. assert table.lookup("k{}".format(i), i) is not None
  35. for i in range(data_size):
  36. table.delete("k{}".format(i), i)
  37. for i in range(data_size):
  38. assert table.lookup("k{}".format(i), i) is None
  39. truth_map = {}
  40. n = 1000000
  41. records = 100
  42. for i in range(n):
  43. key_id = random.randint(0, records)
  44. v = random.randint(0, records)
  45. key = "k{}".format(key_id)
  46. value = CacheEntry(v, v, v, v, v, v, v)
  47. action = random.randint(0, 10)
  48. assert len(truth_map) == table.elements, "{} {} {}".format(
  49. len(truth_map), table.elements, i
  50. )
  51. if action <= 8:
  52. if key in truth_map:
  53. assert table.lookup(key, key_id) is not None
  54. assert truth_map[key].value_size == table.lookup(key, key_id).value_size
  55. else:
  56. assert table.lookup(key, key_id) is None
  57. table.insert(key, key_id, value)
  58. truth_map[key] = value
  59. else:
  60. deleted = table.delete(key, key_id)
  61. if deleted:
  62. assert key in truth_map
  63. if key in truth_map:
  64. del truth_map[key]
  65. # Check all keys are unique in the sample set.
  66. for _i in range(10):
  67. samples = table.random_sample(kSampleSize)
  68. unique_keys = {}
  69. for sample in samples:
  70. unique_keys[sample.key] = True
  71. assert len(samples) == len(unique_keys)
  72. assert len(table) == len(truth_map)
  73. for key in truth_map:
  74. assert table.lookup(key, int(key[1:])) is not None
  75. assert truth_map[key].value_size == table.lookup(key, int(key[1:])).value_size
  76. print("Test hash table: Success")
  77. def assert_metrics(cache, expected_value, expected_value_size=1, custom_hashtable=True):
  78. assert cache.used_size == expected_value[0], "Expected {}, Actual {}".format(
  79. expected_value[0], cache.used_size
  80. )
  81. assert (
  82. cache.miss_ratio_stats.num_accesses == expected_value[1]
  83. ), "Expected {}, Actual {}".format(
  84. expected_value[1], cache.miss_ratio_stats.num_accesses
  85. )
  86. assert (
  87. cache.miss_ratio_stats.num_misses == expected_value[2]
  88. ), "Expected {}, Actual {}".format(
  89. expected_value[2], cache.miss_ratio_stats.num_misses
  90. )
  91. assert len(cache.table) == len(expected_value[3]) + len(
  92. expected_value[4]
  93. ), "Expected {}, Actual {}".format(
  94. len(expected_value[3]) + len(expected_value[4]), cache.table.elements
  95. )
  96. for expeceted_k in expected_value[3]:
  97. if custom_hashtable:
  98. val = cache.table.lookup("b{}".format(expeceted_k), expeceted_k)
  99. else:
  100. val = cache.table["b{}".format(expeceted_k)]
  101. assert val is not None, "Expected {} Actual: Not Exist {}, Table: {}".format(
  102. expeceted_k, expected_value, cache.table
  103. )
  104. assert val.value_size == expected_value_size
  105. for expeceted_k in expected_value[4]:
  106. if custom_hashtable:
  107. val = cache.table.lookup("g0-{}".format(expeceted_k), expeceted_k)
  108. else:
  109. val = cache.table["g0-{}".format(expeceted_k)]
  110. assert val is not None
  111. assert val.value_size == expected_value_size
  112. # Access k1, k1, k2, k3, k3, k3, k4
  113. # When k4 is inserted,
  114. # LRU should evict k1.
  115. # LFU should evict k2.
  116. # MRU should evict k3.
  117. def test_cache(cache, expected_value, custom_hashtable=True):
  118. k1 = TraceRecord(
  119. access_time=0,
  120. block_id=1,
  121. block_type=1,
  122. block_size=1,
  123. cf_id=0,
  124. cf_name="",
  125. level=0,
  126. fd=0,
  127. caller=1,
  128. no_insert=0,
  129. get_id=1,
  130. key_id=1,
  131. kv_size=5,
  132. is_hit=1,
  133. referenced_key_exist_in_block=1,
  134. num_keys_in_block=0,
  135. table_id=0,
  136. seq_number=0,
  137. block_key_size=0,
  138. key_size=0,
  139. block_offset_in_file=0,
  140. next_access_seq_no=0,
  141. )
  142. k2 = TraceRecord(
  143. access_time=1,
  144. block_id=2,
  145. block_type=1,
  146. block_size=1,
  147. cf_id=0,
  148. cf_name="",
  149. level=0,
  150. fd=0,
  151. caller=1,
  152. no_insert=0,
  153. get_id=1,
  154. key_id=1,
  155. kv_size=5,
  156. is_hit=1,
  157. referenced_key_exist_in_block=1,
  158. num_keys_in_block=0,
  159. table_id=0,
  160. seq_number=0,
  161. block_key_size=0,
  162. key_size=0,
  163. block_offset_in_file=0,
  164. next_access_seq_no=0,
  165. )
  166. k3 = TraceRecord(
  167. access_time=2,
  168. block_id=3,
  169. block_type=1,
  170. block_size=1,
  171. cf_id=0,
  172. cf_name="",
  173. level=0,
  174. fd=0,
  175. caller=1,
  176. no_insert=0,
  177. get_id=1,
  178. key_id=1,
  179. kv_size=5,
  180. is_hit=1,
  181. referenced_key_exist_in_block=1,
  182. num_keys_in_block=0,
  183. table_id=0,
  184. seq_number=0,
  185. block_key_size=0,
  186. key_size=0,
  187. block_offset_in_file=0,
  188. next_access_seq_no=0,
  189. )
  190. k4 = TraceRecord(
  191. access_time=3,
  192. block_id=4,
  193. block_type=1,
  194. block_size=1,
  195. cf_id=0,
  196. cf_name="",
  197. level=0,
  198. fd=0,
  199. caller=1,
  200. no_insert=0,
  201. get_id=1,
  202. key_id=1,
  203. kv_size=5,
  204. is_hit=1,
  205. referenced_key_exist_in_block=1,
  206. num_keys_in_block=0,
  207. table_id=0,
  208. seq_number=0,
  209. block_key_size=0,
  210. key_size=0,
  211. block_offset_in_file=0,
  212. next_access_seq_no=0,
  213. )
  214. sequence = [k1, k1, k2, k3, k3, k3]
  215. index = 0
  216. expected_values = []
  217. # Access k1, miss.
  218. expected_values.append([1, 1, 1, [1], []])
  219. # Access k1, hit.
  220. expected_values.append([1, 2, 1, [1], []])
  221. # Access k2, miss.
  222. expected_values.append([2, 3, 2, [1, 2], []])
  223. # Access k3, miss.
  224. expected_values.append([3, 4, 3, [1, 2, 3], []])
  225. # Access k3, hit.
  226. expected_values.append([3, 5, 3, [1, 2, 3], []])
  227. # Access k3, hit.
  228. expected_values.append([3, 6, 3, [1, 2, 3], []])
  229. access_time = 0
  230. for access in sequence:
  231. access.access_time = access_time
  232. cache.access(access)
  233. assert_metrics(
  234. cache,
  235. expected_values[index],
  236. expected_value_size=1,
  237. custom_hashtable=custom_hashtable,
  238. )
  239. access_time += 1
  240. index += 1
  241. k4.access_time = access_time
  242. cache.access(k4)
  243. assert_metrics(
  244. cache, expected_value, expected_value_size=1, custom_hashtable=custom_hashtable
  245. )
  246. def test_lru_cache(cache, custom_hashtable):
  247. print("Test LRU cache")
  248. # Access k4, miss. evict k1
  249. test_cache(cache, [3, 7, 4, [2, 3, 4], []], custom_hashtable)
  250. print("Test LRU cache: Success")
  251. def test_mru_cache():
  252. print("Test MRU cache")
  253. policies = []
  254. policies.append(MRUPolicy())
  255. # Access k4, miss. evict k3
  256. test_cache(
  257. ThompsonSamplingCache(3, False, policies, cost_class_label=None),
  258. [3, 7, 4, [1, 2, 4], []],
  259. )
  260. print("Test MRU cache: Success")
  261. def test_lfu_cache():
  262. print("Test LFU cache")
  263. policies = []
  264. policies.append(LFUPolicy())
  265. # Access k4, miss. evict k2
  266. test_cache(
  267. ThompsonSamplingCache(3, False, policies, cost_class_label=None),
  268. [3, 7, 4, [1, 3, 4], []],
  269. )
  270. print("Test LFU cache: Success")
  271. def test_mix(cache):
  272. print("Test Mix {} cache".format(cache.cache_name()))
  273. n = 100000
  274. records = 100
  275. block_size_table = {}
  276. trace_num_misses = 0
  277. for i in range(n):
  278. key_id = random.randint(0, records)
  279. vs = random.randint(0, 10)
  280. now = i * kMicrosInSecond
  281. block_size = vs
  282. if key_id in block_size_table:
  283. block_size = block_size_table[key_id]
  284. else:
  285. block_size_table[key_id] = block_size
  286. is_hit = key_id % 2
  287. if is_hit == 0:
  288. trace_num_misses += 1
  289. k = TraceRecord(
  290. access_time=now,
  291. block_id=key_id,
  292. block_type=1,
  293. block_size=block_size,
  294. cf_id=0,
  295. cf_name="",
  296. level=0,
  297. fd=0,
  298. caller=1,
  299. no_insert=0,
  300. get_id=key_id,
  301. key_id=key_id,
  302. kv_size=5,
  303. is_hit=is_hit,
  304. referenced_key_exist_in_block=1,
  305. num_keys_in_block=0,
  306. table_id=0,
  307. seq_number=0,
  308. block_key_size=0,
  309. key_size=0,
  310. block_offset_in_file=0,
  311. next_access_seq_no=vs,
  312. )
  313. cache.access(k)
  314. assert cache.miss_ratio_stats.miss_ratio() > 0
  315. if cache.cache_name() == "Trace":
  316. assert cache.miss_ratio_stats.num_accesses == n
  317. assert cache.miss_ratio_stats.num_misses == trace_num_misses
  318. else:
  319. assert cache.used_size <= cache.cache_size
  320. all_values = cache.table.values()
  321. cached_size = 0
  322. for value in all_values:
  323. cached_size += value.value_size
  324. assert cached_size == cache.used_size, "Expeced {} Actual {}".format(
  325. cache.used_size, cached_size
  326. )
  327. print("Test Mix {} cache: Success".format(cache.cache_name()))
  328. def test_end_to_end():
  329. print("Test All caches")
  330. n = 100000
  331. nblocks = 1000
  332. block_size = 16 * 1024
  333. ncfs = 7
  334. nlevels = 6
  335. nfds = 100000
  336. trace_file_path = "test_trace"
  337. # All blocks are of the same size so that OPT must achieve the lowest miss
  338. # ratio.
  339. with open(trace_file_path, "w+") as trace_file:
  340. access_records = ""
  341. for i in range(n):
  342. key_id = random.randint(0, nblocks)
  343. cf_id = random.randint(0, ncfs)
  344. level = random.randint(0, nlevels)
  345. fd = random.randint(0, nfds)
  346. now = i * kMicrosInSecond
  347. access_record = ""
  348. access_record += "{},".format(now)
  349. access_record += "{},".format(key_id)
  350. access_record += "{},".format(9) # block type
  351. access_record += "{},".format(block_size) # block size
  352. access_record += "{},".format(cf_id)
  353. access_record += "cf_{},".format(cf_id)
  354. access_record += "{},".format(level)
  355. access_record += "{},".format(fd)
  356. access_record += "{},".format(key_id % 3) # caller
  357. access_record += "{},".format(0) # no insert
  358. access_record += "{},".format(i) # get_id
  359. access_record += "{},".format(i) # key_id
  360. access_record += "{},".format(100) # kv_size
  361. access_record += "{},".format(1) # is_hit
  362. access_record += "{},".format(1) # referenced_key_exist_in_block
  363. access_record += "{},".format(10) # num_keys_in_block
  364. access_record += "{},".format(1) # table_id
  365. access_record += "{},".format(0) # seq_number
  366. access_record += "{},".format(10) # block key size
  367. access_record += "{},".format(20) # key size
  368. access_record += "{},".format(0) # block offset
  369. access_record = access_record[:-1]
  370. access_records += access_record + "\n"
  371. trace_file.write(access_records)
  372. print("Test All caches: Start testing caches")
  373. cache_size = block_size * nblocks / 10
  374. downsample_size = 1
  375. cache_ms = {}
  376. for cache_type in [
  377. "ts",
  378. "opt",
  379. "lru",
  380. "pylru",
  381. "linucb",
  382. "gdsize",
  383. "pyccbt",
  384. "pycctbbt",
  385. ]:
  386. cache = create_cache(cache_type, cache_size, downsample_size)
  387. run(trace_file_path, cache_type, cache, 0, -1, "all")
  388. cache_ms[cache_type] = cache
  389. assert cache.miss_ratio_stats.num_accesses == n
  390. for cache_type in cache_ms:
  391. cache = cache_ms[cache_type]
  392. ms = cache.miss_ratio_stats.miss_ratio()
  393. assert ms <= 100.0 and ms >= 0.0
  394. # OPT should perform the best.
  395. assert cache_ms["opt"].miss_ratio_stats.miss_ratio() <= ms
  396. assert cache.used_size <= cache.cache_size
  397. all_values = cache.table.values()
  398. cached_size = 0
  399. for value in all_values:
  400. cached_size += value.value_size
  401. assert cached_size == cache.used_size, "Expeced {} Actual {}".format(
  402. cache.used_size, cached_size
  403. )
  404. print("Test All {}: Success".format(cache.cache_name()))
  405. os.remove(trace_file_path)
  406. print("Test All: Success")
  407. def test_hybrid(cache):
  408. print("Test {} cache".format(cache.cache_name()))
  409. k = TraceRecord(
  410. access_time=0,
  411. block_id=1,
  412. block_type=1,
  413. block_size=1,
  414. cf_id=0,
  415. cf_name="",
  416. level=0,
  417. fd=0,
  418. caller=1,
  419. no_insert=0,
  420. get_id=1, # the first get request.
  421. key_id=1,
  422. kv_size=0, # no size.
  423. is_hit=1,
  424. referenced_key_exist_in_block=1,
  425. num_keys_in_block=0,
  426. table_id=0,
  427. seq_number=0,
  428. block_key_size=0,
  429. key_size=0,
  430. block_offset_in_file=0,
  431. next_access_seq_no=0,
  432. )
  433. cache.access(k) # Expect a miss.
  434. # used size, num accesses, num misses, hash table size, blocks, get keys.
  435. assert_metrics(cache, [1, 1, 1, [1], []])
  436. k.access_time += 1
  437. k.kv_size = 1
  438. k.block_id = 2
  439. cache.access(k) # k should be inserted.
  440. assert_metrics(cache, [3, 2, 2, [1, 2], [1]])
  441. k.access_time += 1
  442. k.block_id = 3
  443. cache.access(k) # k should not be inserted again.
  444. assert_metrics(cache, [4, 3, 3, [1, 2, 3], [1]])
  445. # A second get request referencing the same key.
  446. k.access_time += 1
  447. k.get_id = 2
  448. k.block_id = 4
  449. k.kv_size = 0
  450. cache.access(k) # k should observe a hit. No block access.
  451. assert_metrics(cache, [4, 4, 3, [1, 2, 3], [1]])
  452. # A third get request searches three files, three different keys.
  453. # And the second key observes a hit.
  454. k.access_time += 1
  455. k.kv_size = 1
  456. k.get_id = 3
  457. k.block_id = 3
  458. k.key_id = 2
  459. cache.access(k) # k should observe a miss. block 3 observes a hit.
  460. assert_metrics(cache, [5, 5, 3, [1, 2, 3], [1, 2]])
  461. k.access_time += 1
  462. k.kv_size = 1
  463. k.get_id = 3
  464. k.block_id = 4
  465. k.kv_size = 1
  466. k.key_id = 1
  467. cache.access(k) # k1 should observe a hit.
  468. assert_metrics(cache, [5, 6, 3, [1, 2, 3], [1, 2]])
  469. k.access_time += 1
  470. k.kv_size = 1
  471. k.get_id = 3
  472. k.block_id = 4
  473. k.kv_size = 1
  474. k.key_id = 3
  475. # k3 should observe a miss.
  476. # However, as the get already complete, we should not access k3 any more.
  477. cache.access(k)
  478. assert_metrics(cache, [5, 7, 3, [1, 2, 3], [1, 2]])
  479. # A fourth get request searches one file and two blocks. One row key.
  480. k.access_time += 1
  481. k.get_id = 4
  482. k.block_id = 5
  483. k.key_id = 4
  484. k.kv_size = 1
  485. cache.access(k)
  486. assert_metrics(cache, [7, 8, 4, [1, 2, 3, 5], [1, 2, 4]])
  487. # A bunch of insertions which evict cached row keys.
  488. for i in range(6, 100):
  489. k.access_time += 1
  490. k.get_id = 0
  491. k.block_id = i
  492. cache.access(k)
  493. k.get_id = 4
  494. k.block_id = 100 # A different block.
  495. k.key_id = 4 # Same row key and should not be inserted again.
  496. k.kv_size = 1
  497. cache.access(k)
  498. assert_metrics(
  499. cache, [kSampleSize, 103, 99, [i for i in range(101 - kSampleSize, 101)], []]
  500. )
  501. print("Test {} cache: Success".format(cache.cache_name()))
  502. def test_opt_cache():
  503. print("Test OPT cache")
  504. cache = OPTCache(3)
  505. # seq: 0, 1, 2, 3, 4, 5, 6, 7, 8
  506. # key: k1, k2, k3, k4, k5, k6, k7, k1, k8
  507. # next_access: 7, 19, 18, M, M, 17, 16, 25, M
  508. k = TraceRecord(
  509. access_time=0,
  510. block_id=1,
  511. block_type=1,
  512. block_size=1,
  513. cf_id=0,
  514. cf_name="",
  515. level=0,
  516. fd=0,
  517. caller=1,
  518. no_insert=0,
  519. get_id=1, # the first get request.
  520. key_id=1,
  521. kv_size=0, # no size.
  522. is_hit=1,
  523. referenced_key_exist_in_block=1,
  524. num_keys_in_block=0,
  525. table_id=0,
  526. seq_number=0,
  527. block_key_size=0,
  528. key_size=0,
  529. block_offset_in_file=0,
  530. next_access_seq_no=7,
  531. )
  532. cache.access(k)
  533. assert_metrics(
  534. cache, [1, 1, 1, [1], []], expected_value_size=1, custom_hashtable=False
  535. )
  536. k.access_time += 1
  537. k.block_id = 2
  538. k.next_access_seq_no = 19
  539. cache.access(k)
  540. assert_metrics(
  541. cache, [2, 2, 2, [1, 2], []], expected_value_size=1, custom_hashtable=False
  542. )
  543. k.access_time += 1
  544. k.block_id = 3
  545. k.next_access_seq_no = 18
  546. cache.access(k)
  547. assert_metrics(
  548. cache, [3, 3, 3, [1, 2, 3], []], expected_value_size=1, custom_hashtable=False
  549. )
  550. k.access_time += 1
  551. k.block_id = 4
  552. k.next_access_seq_no = sys.maxsize # Never accessed again.
  553. cache.access(k)
  554. # Evict 2 since its next access 19 is the furthest in the future.
  555. assert_metrics(
  556. cache, [3, 4, 4, [1, 3, 4], []], expected_value_size=1, custom_hashtable=False
  557. )
  558. k.access_time += 1
  559. k.block_id = 5
  560. k.next_access_seq_no = sys.maxsize # Never accessed again.
  561. cache.access(k)
  562. # Evict 4 since its next access MAXINT is the furthest in the future.
  563. assert_metrics(
  564. cache, [3, 5, 5, [1, 3, 5], []], expected_value_size=1, custom_hashtable=False
  565. )
  566. k.access_time += 1
  567. k.block_id = 6
  568. k.next_access_seq_no = 17
  569. cache.access(k)
  570. # Evict 5 since its next access MAXINT is the furthest in the future.
  571. assert_metrics(
  572. cache, [3, 6, 6, [1, 3, 6], []], expected_value_size=1, custom_hashtable=False
  573. )
  574. k.access_time += 1
  575. k.block_id = 7
  576. k.next_access_seq_no = 16
  577. cache.access(k)
  578. # Evict 3 since its next access 18 is the furthest in the future.
  579. assert_metrics(
  580. cache, [3, 7, 7, [1, 6, 7], []], expected_value_size=1, custom_hashtable=False
  581. )
  582. k.access_time += 1
  583. k.block_id = 1
  584. k.next_access_seq_no = 25
  585. cache.access(k)
  586. assert_metrics(
  587. cache, [3, 8, 7, [1, 6, 7], []], expected_value_size=1, custom_hashtable=False
  588. )
  589. k.access_time += 1
  590. k.block_id = 8
  591. k.next_access_seq_no = sys.maxsize
  592. cache.access(k)
  593. # Evict 1 since its next access 25 is the furthest in the future.
  594. assert_metrics(
  595. cache, [3, 9, 8, [6, 7, 8], []], expected_value_size=1, custom_hashtable=False
  596. )
  597. # Insert a large kv pair to evict all keys.
  598. k.access_time += 1
  599. k.block_id = 10
  600. k.block_size = 3
  601. k.next_access_seq_no = sys.maxsize
  602. cache.access(k)
  603. assert_metrics(
  604. cache, [3, 10, 9, [10], []], expected_value_size=3, custom_hashtable=False
  605. )
  606. print("Test OPT cache: Success")
  607. def test_trace_cache():
  608. print("Test trace cache")
  609. cache = TraceCache(0)
  610. k = TraceRecord(
  611. access_time=0,
  612. block_id=1,
  613. block_type=1,
  614. block_size=1,
  615. cf_id=0,
  616. cf_name="",
  617. level=0,
  618. fd=0,
  619. caller=1,
  620. no_insert=0,
  621. get_id=1,
  622. key_id=1,
  623. kv_size=0,
  624. is_hit=1,
  625. referenced_key_exist_in_block=1,
  626. num_keys_in_block=0,
  627. table_id=0,
  628. seq_number=0,
  629. block_key_size=0,
  630. key_size=0,
  631. block_offset_in_file=0,
  632. next_access_seq_no=7,
  633. )
  634. cache.access(k)
  635. assert cache.miss_ratio_stats.num_accesses == 1
  636. assert cache.miss_ratio_stats.num_misses == 0
  637. k.is_hit = 0
  638. cache.access(k)
  639. assert cache.miss_ratio_stats.num_accesses == 2
  640. assert cache.miss_ratio_stats.num_misses == 1
  641. print("Test trace cache: Success")
  642. if __name__ == "__main__":
  643. test_hash_table()
  644. test_trace_cache()
  645. test_opt_cache()
  646. test_lru_cache(
  647. ThompsonSamplingCache(
  648. 3, enable_cache_row_key=0, policies=[LRUPolicy()], cost_class_label=None
  649. ),
  650. custom_hashtable=True,
  651. )
  652. test_lru_cache(LRUCache(3, enable_cache_row_key=0), custom_hashtable=False)
  653. test_mru_cache()
  654. test_lfu_cache()
  655. test_hybrid(
  656. ThompsonSamplingCache(
  657. kSampleSize,
  658. enable_cache_row_key=1,
  659. policies=[LRUPolicy()],
  660. cost_class_label=None,
  661. )
  662. )
  663. test_hybrid(
  664. LinUCBCache(
  665. kSampleSize,
  666. enable_cache_row_key=1,
  667. policies=[LRUPolicy()],
  668. cost_class_label=None,
  669. )
  670. )
  671. for cache_type in [
  672. "ts",
  673. "opt",
  674. "arc",
  675. "pylfu",
  676. "pymru",
  677. "trace",
  678. "pyhb",
  679. "lru",
  680. "pylru",
  681. "linucb",
  682. "gdsize",
  683. "pycctbbt",
  684. "pycctb",
  685. "pyccbt",
  686. ]:
  687. for enable_row_cache in [0, 1, 2]:
  688. cache_type_str = cache_type
  689. if cache_type != "opt" and cache_type != "trace":
  690. if enable_row_cache == 1:
  691. cache_type_str += "_hybrid"
  692. elif enable_row_cache == 2:
  693. cache_type_str += "_hybridn"
  694. test_mix(create_cache(cache_type_str, cache_size=100, downsample_size=1))
  695. test_end_to_end()