builder.cc 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
  2. // This source code is licensed under both the GPLv2 (found in the
  3. // COPYING file in the root directory) and Apache 2.0 License
  4. // (found in the LICENSE.Apache file in the root directory).
  5. //
  6. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  7. // Use of this source code is governed by a BSD-style license that can be
  8. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  9. #include "db/builder.h"
  10. #include <algorithm>
  11. #include <deque>
  12. #include <vector>
  13. #include "db/blob/blob_file_builder.h"
  14. #include "db/compaction/compaction_iterator.h"
  15. #include "db/dbformat.h"
  16. #include "db/event_helpers.h"
  17. #include "db/internal_stats.h"
  18. #include "db/merge_helper.h"
  19. #include "db/output_validator.h"
  20. #include "db/range_del_aggregator.h"
  21. #include "db/table_cache.h"
  22. #include "db/version_edit.h"
  23. #include "file/file_util.h"
  24. #include "file/filename.h"
  25. #include "file/read_write_util.h"
  26. #include "file/writable_file_writer.h"
  27. #include "monitoring/iostats_context_imp.h"
  28. #include "monitoring/thread_status_util.h"
  29. #include "options/options_helper.h"
  30. #include "rocksdb/db.h"
  31. #include "rocksdb/env.h"
  32. #include "rocksdb/file_system.h"
  33. #include "rocksdb/iterator.h"
  34. #include "rocksdb/options.h"
  35. #include "rocksdb/table.h"
  36. #include "seqno_to_time_mapping.h"
  37. #include "table/block_based/block_based_table_builder.h"
  38. #include "table/format.h"
  39. #include "table/internal_iterator.h"
  40. #include "table/unique_id_impl.h"
  41. #include "test_util/sync_point.h"
  42. #include "util/stop_watch.h"
  43. namespace ROCKSDB_NAMESPACE {
  44. class TableFactory;
  45. TableBuilder* NewTableBuilder(const TableBuilderOptions& tboptions,
  46. WritableFileWriter* file) {
  47. assert((tboptions.column_family_id ==
  48. TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) ==
  49. tboptions.column_family_name.empty());
  50. return tboptions.moptions.table_factory->NewTableBuilder(tboptions, file);
  51. }
  52. Status BuildTable(
  53. const std::string& dbname, VersionSet* versions,
  54. const ImmutableDBOptions& db_options, const TableBuilderOptions& tboptions,
  55. const FileOptions& file_options, TableCache* table_cache,
  56. InternalIterator* iter,
  57. std::vector<std::unique_ptr<FragmentedRangeTombstoneIterator>>
  58. range_del_iters,
  59. FileMetaData* meta, std::vector<BlobFileAddition>* blob_file_additions,
  60. std::vector<SequenceNumber> snapshots, SequenceNumber earliest_snapshot,
  61. SequenceNumber earliest_write_conflict_snapshot,
  62. SequenceNumber job_snapshot, SnapshotChecker* snapshot_checker,
  63. bool paranoid_file_checks, InternalStats* internal_stats,
  64. IOStatus* io_status, const std::shared_ptr<IOTracer>& io_tracer,
  65. BlobFileCreationReason blob_creation_reason,
  66. UnownedPtr<const SeqnoToTimeMapping> seqno_to_time_mapping,
  67. EventLogger* event_logger, int job_id, TableProperties* table_properties,
  68. Env::WriteLifeTimeHint write_hint, const std::string* full_history_ts_low,
  69. BlobFileCompletionCallback* blob_callback, Version* version,
  70. uint64_t* memtable_payload_bytes, uint64_t* memtable_garbage_bytes,
  71. InternalStats::CompactionStats* flush_stats) {
  72. assert((tboptions.column_family_id ==
  73. TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) ==
  74. tboptions.column_family_name.empty());
  75. auto& mutable_cf_options = tboptions.moptions;
  76. auto& ioptions = tboptions.ioptions;
  77. // Reports the IOStats for flush for every following bytes.
  78. const size_t kReportFlushIOStatsEvery = 1048576;
  79. OutputValidator output_validator(tboptions.internal_comparator,
  80. /*enable_hash=*/paranoid_file_checks);
  81. Status s;
  82. meta->fd.file_size = 0;
  83. iter->SeekToFirst();
  84. std::unique_ptr<CompactionRangeDelAggregator> range_del_agg(
  85. new CompactionRangeDelAggregator(&tboptions.internal_comparator,
  86. snapshots, full_history_ts_low));
  87. uint64_t num_unfragmented_tombstones = 0;
  88. uint64_t total_tombstone_payload_bytes = 0;
  89. for (auto& range_del_iter : range_del_iters) {
  90. num_unfragmented_tombstones +=
  91. range_del_iter->num_unfragmented_tombstones();
  92. total_tombstone_payload_bytes +=
  93. range_del_iter->total_tombstone_payload_bytes();
  94. range_del_agg->AddTombstones(std::move(range_del_iter));
  95. }
  96. std::string fname = TableFileName(ioptions.cf_paths, meta->fd.GetNumber(),
  97. meta->fd.GetPathId());
  98. std::vector<std::string> blob_file_paths;
  99. std::string file_checksum = kUnknownFileChecksum;
  100. std::string file_checksum_func_name = kUnknownFileChecksumFuncName;
  101. EventHelpers::NotifyTableFileCreationStarted(ioptions.listeners, dbname,
  102. tboptions.column_family_name,
  103. fname, job_id, tboptions.reason);
  104. Env* env = db_options.env;
  105. assert(env);
  106. FileSystem* fs = db_options.fs.get();
  107. assert(fs);
  108. TableProperties tp;
  109. bool table_file_created = false;
  110. if (iter->Valid() || !range_del_agg->IsEmpty()) {
  111. std::unique_ptr<CompactionFilter> compaction_filter;
  112. if (ioptions.compaction_filter_factory != nullptr &&
  113. ioptions.compaction_filter_factory->ShouldFilterTableFileCreation(
  114. tboptions.reason)) {
  115. CompactionFilter::Context context;
  116. context.is_full_compaction = false;
  117. context.is_manual_compaction = false;
  118. context.column_family_id = tboptions.column_family_id;
  119. context.reason = tboptions.reason;
  120. compaction_filter =
  121. ioptions.compaction_filter_factory->CreateCompactionFilter(context);
  122. if (compaction_filter != nullptr &&
  123. !compaction_filter->IgnoreSnapshots()) {
  124. s.PermitUncheckedError();
  125. return Status::NotSupported(
  126. "CompactionFilter::IgnoreSnapshots() = false is not supported "
  127. "anymore.");
  128. }
  129. }
  130. TableBuilder* builder;
  131. std::unique_ptr<WritableFileWriter> file_writer;
  132. {
  133. std::unique_ptr<FSWritableFile> file;
  134. #ifndef NDEBUG
  135. bool use_direct_writes = file_options.use_direct_writes;
  136. TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes);
  137. #endif // !NDEBUG
  138. FileOptions fo_copy = file_options;
  139. fo_copy.write_hint = write_hint;
  140. IOStatus io_s = NewWritableFile(fs, fname, &file, fo_copy);
  141. assert(s.ok());
  142. s = io_s;
  143. if (io_status->ok()) {
  144. *io_status = io_s;
  145. }
  146. if (!s.ok()) {
  147. EventHelpers::LogAndNotifyTableFileCreationFinished(
  148. event_logger, ioptions.listeners, dbname,
  149. tboptions.column_family_name, fname, job_id, meta->fd,
  150. kInvalidBlobFileNumber, tp, tboptions.reason, s, file_checksum,
  151. file_checksum_func_name);
  152. return s;
  153. }
  154. table_file_created = true;
  155. FileTypeSet tmp_set = ioptions.checksum_handoff_file_types;
  156. file->SetIOPriority(tboptions.write_options.rate_limiter_priority);
  157. // Subsequent attempts to override the hint via SetWriteLifeTimeHint
  158. // with the very same value will be ignored by the fs.
  159. file->SetWriteLifeTimeHint(fo_copy.write_hint);
  160. file_writer.reset(new WritableFileWriter(
  161. std::move(file), fname, file_options, ioptions.clock, io_tracer,
  162. ioptions.stats, Histograms::SST_WRITE_MICROS, ioptions.listeners,
  163. ioptions.file_checksum_gen_factory.get(),
  164. tmp_set.Contains(FileType::kTableFile), false));
  165. builder = NewTableBuilder(tboptions, file_writer.get());
  166. }
  167. auto ucmp = tboptions.internal_comparator.user_comparator();
  168. MergeHelper merge(
  169. env, ucmp, ioptions.merge_operator.get(), compaction_filter.get(),
  170. ioptions.logger, true /* internal key corruption is not ok */,
  171. snapshots.empty() ? 0 : snapshots.back(), snapshot_checker);
  172. std::unique_ptr<BlobFileBuilder> blob_file_builder(
  173. (mutable_cf_options.enable_blob_files &&
  174. tboptions.level_at_creation >=
  175. mutable_cf_options.blob_file_starting_level &&
  176. blob_file_additions)
  177. ? new BlobFileBuilder(
  178. versions, fs, &ioptions, &mutable_cf_options, &file_options,
  179. &(tboptions.write_options), tboptions.db_id,
  180. tboptions.db_session_id, job_id, tboptions.column_family_id,
  181. tboptions.column_family_name, write_hint, io_tracer,
  182. blob_callback, blob_creation_reason, &blob_file_paths,
  183. blob_file_additions)
  184. : nullptr);
  185. const std::atomic<bool> kManualCompactionCanceledFalse{false};
  186. CompactionIterator c_iter(
  187. iter, ucmp, &merge, kMaxSequenceNumber, &snapshots, earliest_snapshot,
  188. earliest_write_conflict_snapshot, job_snapshot, snapshot_checker, env,
  189. ShouldReportDetailedTime(env, ioptions.stats), range_del_agg.get(),
  190. blob_file_builder.get(), ioptions.allow_data_in_errors,
  191. ioptions.enforce_single_del_contracts,
  192. /*manual_compaction_canceled=*/kManualCompactionCanceledFalse,
  193. true /* must_count_input_entries */,
  194. /*compaction=*/nullptr, compaction_filter.get(),
  195. /*shutting_down=*/nullptr, db_options.info_log, full_history_ts_low);
  196. SequenceNumber smallest_preferred_seqno = kMaxSequenceNumber;
  197. std::string key_after_flush_buf;
  198. std::string value_buf;
  199. c_iter.SeekToFirst();
  200. for (; c_iter.Valid(); c_iter.Next()) {
  201. const Slice& key = c_iter.key();
  202. const Slice& value = c_iter.value();
  203. ParsedInternalKey ikey = c_iter.ikey();
  204. Slice key_after_flush = key;
  205. Slice value_after_flush = value;
  206. if (ikey.type == kTypeValuePreferredSeqno) {
  207. auto [unpacked_value, unix_write_time] =
  208. ParsePackedValueWithWriteTime(value);
  209. SequenceNumber preferred_seqno =
  210. seqno_to_time_mapping
  211. ? seqno_to_time_mapping->GetProximalSeqnoBeforeTime(
  212. unix_write_time)
  213. : kMaxSequenceNumber;
  214. if (preferred_seqno < ikey.sequence) {
  215. value_after_flush =
  216. PackValueAndSeqno(unpacked_value, preferred_seqno, &value_buf);
  217. smallest_preferred_seqno =
  218. std::min(smallest_preferred_seqno, preferred_seqno);
  219. } else {
  220. // Cannot get a useful preferred seqno, convert it to a kTypeValue.
  221. key_after_flush_buf.assign(key.data(), key.size());
  222. UpdateInternalKey(&key_after_flush_buf, ikey.sequence, kTypeValue);
  223. ikey = ParsedInternalKey(ikey.user_key, ikey.sequence, kTypeValue);
  224. key_after_flush = key_after_flush_buf;
  225. value_after_flush = ParsePackedValueForValue(value);
  226. }
  227. }
  228. // Generate a rolling 64-bit hash of the key and values
  229. // Note :
  230. // Here "key" integrates 'sequence_number'+'kType'+'user key'.
  231. s = output_validator.Add(key_after_flush, value_after_flush);
  232. if (!s.ok()) {
  233. break;
  234. }
  235. builder->Add(key_after_flush, value_after_flush);
  236. if (flush_stats) {
  237. flush_stats->num_output_records++;
  238. }
  239. s = meta->UpdateBoundaries(key_after_flush, value_after_flush,
  240. ikey.sequence, ikey.type);
  241. if (!s.ok()) {
  242. break;
  243. }
  244. // TODO(noetzli): Update stats after flush, too.
  245. // TODO(hx235): Replace `rate_limiter_priority` with `io_activity` for
  246. // flush IO in repair when we have an `Env::IOActivity` enum for it
  247. if ((tboptions.write_options.io_activity == Env::IOActivity::kFlush ||
  248. tboptions.write_options.io_activity == Env::IOActivity::kDBOpen ||
  249. tboptions.write_options.rate_limiter_priority == Env::IO_HIGH) &&
  250. IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) {
  251. ThreadStatusUtil::SetThreadOperationProperty(
  252. ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
  253. }
  254. }
  255. if (!s.ok()) {
  256. c_iter.status().PermitUncheckedError();
  257. } else if (!c_iter.status().ok()) {
  258. s = c_iter.status();
  259. }
  260. if (s.ok()) {
  261. auto range_del_it = range_del_agg->NewIterator();
  262. Slice last_tombstone_start_user_key{};
  263. for (range_del_it->SeekToFirst(); range_del_it->Valid();
  264. range_del_it->Next()) {
  265. auto tombstone = range_del_it->Tombstone();
  266. std::pair<InternalKey, Slice> kv = tombstone.Serialize();
  267. builder->Add(kv.first.Encode(), kv.second);
  268. if (flush_stats) {
  269. flush_stats->num_output_records++;
  270. }
  271. InternalKey tombstone_end = tombstone.SerializeEndKey();
  272. meta->UpdateBoundariesForRange(kv.first, tombstone_end, tombstone.seq_,
  273. tboptions.internal_comparator);
  274. if (version) {
  275. if (last_tombstone_start_user_key.empty() ||
  276. ucmp->CompareWithoutTimestamp(last_tombstone_start_user_key,
  277. range_del_it->start_key()) < 0) {
  278. SizeApproximationOptions approx_opts;
  279. approx_opts.files_size_error_margin = 0.1;
  280. meta->compensated_range_deletion_size += versions->ApproximateSize(
  281. approx_opts, tboptions.read_options, version, kv.first.Encode(),
  282. tombstone_end.Encode(), 0 /* start_level */, -1 /* end_level */,
  283. TableReaderCaller::kFlush);
  284. }
  285. last_tombstone_start_user_key = range_del_it->start_key();
  286. }
  287. }
  288. }
  289. TEST_SYNC_POINT("BuildTable:BeforeFinishBuildTable");
  290. const bool empty = builder->IsEmpty();
  291. if (flush_stats) {
  292. assert(c_iter.HasNumInputEntryScanned());
  293. flush_stats->num_input_records =
  294. c_iter.NumInputEntryScanned() + num_unfragmented_tombstones;
  295. }
  296. if (!s.ok() || empty) {
  297. builder->Abandon();
  298. } else {
  299. SeqnoToTimeMapping relevant_mapping;
  300. if (seqno_to_time_mapping) {
  301. relevant_mapping.CopyFromSeqnoRange(
  302. *seqno_to_time_mapping,
  303. std::min(meta->fd.smallest_seqno, smallest_preferred_seqno),
  304. meta->fd.largest_seqno);
  305. relevant_mapping.SetCapacity(kMaxSeqnoTimePairsPerSST);
  306. relevant_mapping.Enforce(tboptions.file_creation_time);
  307. }
  308. builder->SetSeqnoTimeTableProperties(
  309. relevant_mapping,
  310. ioptions.compaction_style == CompactionStyle::kCompactionStyleFIFO
  311. ? meta->file_creation_time
  312. : meta->oldest_ancester_time);
  313. s = builder->Finish();
  314. }
  315. if (io_status->ok()) {
  316. *io_status = builder->io_status();
  317. }
  318. if (s.ok() && !empty) {
  319. if (flush_stats) {
  320. flush_stats->bytes_written_pre_comp = builder->PreCompressionSize();
  321. // Add worker CPU micros here. Caller needs to add CPU micros from
  322. // calling thread.
  323. flush_stats->cpu_micros += builder->GetWorkerCPUMicros();
  324. }
  325. uint64_t file_size = builder->FileSize();
  326. meta->fd.file_size = file_size;
  327. meta->tail_size = builder->GetTailSize();
  328. meta->marked_for_compaction = builder->NeedCompact();
  329. meta->user_defined_timestamps_persisted =
  330. ioptions.persist_user_defined_timestamps;
  331. assert(meta->fd.GetFileSize() > 0);
  332. tp = builder
  333. ->GetTableProperties(); // refresh now that builder is finished
  334. if (memtable_payload_bytes != nullptr &&
  335. memtable_garbage_bytes != nullptr) {
  336. const CompactionIterationStats& ci_stats = c_iter.iter_stats();
  337. uint64_t total_payload_bytes = ci_stats.total_input_raw_key_bytes +
  338. ci_stats.total_input_raw_value_bytes +
  339. total_tombstone_payload_bytes;
  340. uint64_t total_payload_bytes_written =
  341. (tp.raw_key_size + tp.raw_value_size);
  342. // Prevent underflow, which may still happen at this point
  343. // since we only support inserts, deletes, and deleteRanges.
  344. if (total_payload_bytes_written <= total_payload_bytes) {
  345. *memtable_payload_bytes = total_payload_bytes;
  346. *memtable_garbage_bytes =
  347. total_payload_bytes - total_payload_bytes_written;
  348. } else {
  349. *memtable_payload_bytes = 0;
  350. *memtable_garbage_bytes = 0;
  351. }
  352. }
  353. if (table_properties) {
  354. *table_properties = tp;
  355. }
  356. }
  357. delete builder;
  358. // Finish and check for file errors
  359. TEST_SYNC_POINT("BuildTable:BeforeSyncTable");
  360. IOOptions opts;
  361. *io_status =
  362. WritableFileWriter::PrepareIOOptions(tboptions.write_options, opts);
  363. if (s.ok() && io_status->ok() && !empty) {
  364. StopWatch sw(ioptions.clock, ioptions.stats, TABLE_SYNC_MICROS);
  365. *io_status = file_writer->Sync(opts, ioptions.use_fsync);
  366. }
  367. TEST_SYNC_POINT("BuildTable:BeforeCloseTableFile");
  368. if (s.ok() && io_status->ok() && !empty) {
  369. *io_status = file_writer->Close(opts);
  370. }
  371. if (s.ok() && io_status->ok() && !empty) {
  372. // Add the checksum information to file metadata.
  373. meta->file_checksum = file_writer->GetFileChecksum();
  374. meta->file_checksum_func_name = file_writer->GetFileChecksumFuncName();
  375. file_checksum = meta->file_checksum;
  376. file_checksum_func_name = meta->file_checksum_func_name;
  377. // Set unique_id only if db_id and db_session_id exist
  378. if (!tboptions.db_id.empty() && !tboptions.db_session_id.empty()) {
  379. if (!GetSstInternalUniqueId(tboptions.db_id, tboptions.db_session_id,
  380. meta->fd.GetNumber(), &(meta->unique_id))
  381. .ok()) {
  382. // if failed to get unique id, just set it Null
  383. meta->unique_id = kNullUniqueId64x2;
  384. }
  385. }
  386. }
  387. if (s.ok()) {
  388. s = *io_status;
  389. }
  390. // TODO(yuzhangyu): handle the key copy in the blob when ts should be
  391. // stripped.
  392. if (blob_file_builder) {
  393. if (s.ok()) {
  394. s = blob_file_builder->Finish();
  395. } else {
  396. blob_file_builder->Abandon(s);
  397. }
  398. blob_file_builder.reset();
  399. }
  400. // TODO Also check the IO status when create the Iterator.
  401. TEST_SYNC_POINT("BuildTable:BeforeOutputValidation");
  402. if (s.ok() && !empty) {
  403. // Verify that the table is usable
  404. // We set for_compaction to false and don't OptimizeForCompactionTableRead
  405. // here because this is a special case after we finish the table building.
  406. // No matter whether use_direct_io_for_flush_and_compaction is true,
  407. // the goal is to cache it here for further user reads.
  408. std::unique_ptr<InternalIterator> it(table_cache->NewIterator(
  409. tboptions.read_options, file_options, tboptions.internal_comparator,
  410. *meta, nullptr /* range_del_agg */, mutable_cf_options, nullptr,
  411. (internal_stats == nullptr) ? nullptr
  412. : internal_stats->GetFileReadHist(0),
  413. TableReaderCaller::kFlush, /*arena=*/nullptr,
  414. /*skip_filter=*/false, tboptions.level_at_creation,
  415. MaxFileSizeForL0MetaPin(mutable_cf_options),
  416. /*smallest_compaction_key=*/nullptr,
  417. /*largest_compaction_key*/ nullptr,
  418. /*allow_unprepared_value*/ false));
  419. s = it->status();
  420. if (s.ok() && paranoid_file_checks) {
  421. OutputValidator file_validator(tboptions.internal_comparator,
  422. /*enable_hash=*/true);
  423. for (it->SeekToFirst(); it->Valid(); it->Next()) {
  424. // Generate a rolling 64-bit hash of the key and values
  425. file_validator.Add(it->key(), it->value()).PermitUncheckedError();
  426. }
  427. s = it->status();
  428. if (s.ok() && !output_validator.CompareValidator(file_validator)) {
  429. s = Status::Corruption("Paranoid checksums do not match");
  430. }
  431. }
  432. }
  433. }
  434. // Check for input iterator errors
  435. if (!iter->status().ok()) {
  436. s = iter->status();
  437. }
  438. if (!s.ok() || meta->fd.GetFileSize() == 0) {
  439. TEST_SYNC_POINT("BuildTable:BeforeDeleteFile");
  440. constexpr IODebugContext* dbg = nullptr;
  441. if (table_file_created) {
  442. IOOptions opts;
  443. Status prepare =
  444. WritableFileWriter::PrepareIOOptions(tboptions.write_options, opts);
  445. if (prepare.ok()) {
  446. // FIXME: track file for "slow" deletion, e.g. into the
  447. // VersionSet::obsolete_files_ pipeline
  448. Status ignored = fs->DeleteFile(fname, opts, dbg);
  449. ignored.PermitUncheckedError();
  450. }
  451. // Ensure we don't leak table cache entries when throwing away output
  452. // files. (The usual logic in PurgeObsoleteFiles is not applicable because
  453. // this function deletes the obsolete file itself, while they should
  454. // probably go into the VersionSet::obsolete_files_ pipeline.)
  455. TableCache::ReleaseObsolete(table_cache->get_cache().get(),
  456. meta->fd.GetNumber(), nullptr /*handle*/,
  457. mutable_cf_options.uncache_aggressiveness);
  458. }
  459. assert(blob_file_additions || blob_file_paths.empty());
  460. if (blob_file_additions) {
  461. for (const std::string& blob_file_path : blob_file_paths) {
  462. Status ignored = DeleteDBFile(&db_options, blob_file_path, dbname,
  463. /*force_bg=*/false, /*force_fg=*/false);
  464. ignored.PermitUncheckedError();
  465. TEST_SYNC_POINT("BuildTable::AfterDeleteFile");
  466. }
  467. }
  468. }
  469. Status status_for_listener = s;
  470. if (meta->fd.GetFileSize() == 0) {
  471. fname = "(nil)";
  472. if (s.ok()) {
  473. status_for_listener = Status::Aborted("Empty SST file not kept");
  474. }
  475. }
  476. // Output to event logger and fire events.
  477. EventHelpers::LogAndNotifyTableFileCreationFinished(
  478. event_logger, ioptions.listeners, dbname, tboptions.column_family_name,
  479. fname, job_id, meta->fd, meta->oldest_blob_file_number, tp,
  480. tboptions.reason, status_for_listener, file_checksum,
  481. file_checksum_func_name);
  482. return s;
  483. }
  484. } // namespace ROCKSDB_NAMESPACE