YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/rocksdb/db/deletefile_test.cc
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under the BSD-style license found in the
3
//  LICENSE file in the root directory of this source tree. An additional grant
4
//  of patent rights can be found in the PATENTS file in the same directory.
5
//
6
// The following only applies to changes made to this file as part of YugaByte development.
7
//
8
// Portions Copyright (c) YugaByte, Inc.
9
//
10
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
11
// in compliance with the License.  You may obtain a copy of the License at
12
//
13
// http://www.apache.org/licenses/LICENSE-2.0
14
//
15
// Unless required by applicable law or agreed to in writing, software distributed under the License
16
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
17
// or implied.  See the License for the specific language governing permissions and limitations
18
// under the License.
19
//
20
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
21
// Use of this source code is governed by a BSD-style license that can be
22
// found in the LICENSE file. See the AUTHORS file for names of contributors.
23
24
#ifndef ROCKSDB_LITE
25
26
#include <stdlib.h>
27
28
#include <vector>
29
#include <map>
30
#include <string>
31
32
#include <boost/function.hpp>
33
34
#include "yb/rocksdb/db.h"
35
#include "yb/rocksdb/db/db_impl.h"
36
#include "yb/rocksdb/db/db_test_util.h"
37
#include "yb/rocksdb/db/filename.h"
38
#include "yb/rocksdb/db/version_set.h"
39
#include "yb/rocksdb/db/write_batch_internal.h"
40
#include "yb/rocksdb/util/file_util.h"
41
#include "yb/rocksdb/util/testharness.h"
42
#include "yb/rocksdb/util/testutil.h"
43
#include "yb/rocksdb/env.h"
44
#include "yb/rocksdb/transaction_log.h"
45
46
#include "yb/util/status_log.h"
47
#include "yb/util/string_util.h"
48
#include "yb/util/test_macros.h"
49
50
using namespace std::chrono_literals;
51
52
namespace rocksdb {
53
54
YB_STRONGLY_TYPED_BOOL(StopOnMaxFilesDeleted);
55
56
class DeleteFileTest : public RocksDBTest {
57
 public:
58
  std::string dbname_;
59
  Options options_;
60
  std::unique_ptr<DB> db_;
61
  Env* env_;
62
  int numlevels_;
63
64
7
  DeleteFileTest() {
65
7
    db_.reset();
66
7
    env_ = Env::Default();
67
7
    options_.delete_obsolete_files_period_micros = 0;  // always do full purge
68
7
    options_.enable_thread_tracking = true;
69
7
    options_.write_buffer_size = 1024*1024*1000;
70
7
    options_.target_file_size_base = 1024*1024*1000;
71
7
    options_.max_bytes_for_level_base = 1024*1024*1000;
72
7
    options_.WAL_ttl_seconds = 300; // Used to test log files
73
7
    options_.WAL_size_limit_MB = 1024; // Used to test log files
74
7
    dbname_ = test::TmpDir() + "/deletefile_test";
75
7
    options_.wal_dir = dbname_ + "/wal_files";
76
77
    // clean up all the files that might have been there before
78
7
    CHECK_OK(DeleteRecursively(env_, dbname_));
79
7
    CHECK_OK(DestroyDB(dbname_, options_));
80
7
    numlevels_ = 7;
81
7
    EXPECT_OK(ReopenDB(true));
82
7
  }
83
84
14
  Status ReopenDB(bool create) {
85
14
    db_.reset();
86
14
    if (create) {
87
14
      RETURN_NOT_OK(DestroyDB(dbname_, options_));
88
14
    }
89
14
    options_.create_if_missing = create;
90
14
    DB* db;
91
14
    auto status = DB::Open(options_, dbname_, &db);
92
14
    db_.reset(db);
93
14
    return status;
94
14
  }
95
96
11
  void CloseDB() {
97
11
    db_.reset();
98
11
  }
99
100
25
  void AddKeys(int numkeys, int startkey = 0) {
101
25
    WriteOptions options;
102
25
    options.sync = false;
103
25
    ReadOptions roptions;
104
520k
    for (int i = startkey; i < (numkeys + startkey) ; i++) {
105
520k
      std::string temp = ToString(i);
106
520k
      Slice key(temp);
107
520k
      Slice value(temp);
108
520k
      ASSERT_OK(db_->Put(options, key, value));
109
520k
    }
110
25
  }
111
112
  int numKeysInLevels(
113
    const std::vector<LiveFileMetaData> &metadata,
114
0
    std::vector<int> *keysperlevel = nullptr) {
115
0
116
0
    if (keysperlevel != nullptr) {
117
0
      keysperlevel->resize(numlevels_);
118
0
    }
119
0
120
0
    int numKeys = 0;
121
0
    for (size_t i = 0; i < metadata.size(); i++) {
122
0
      int startkey = atoi(metadata[i].smallest.key.c_str());
123
0
      int endkey = atoi(metadata[i].largest.key.c_str());
124
0
      int numkeysinfile = (endkey - startkey + 1);
125
0
      numKeys += numkeysinfile;
126
0
      if (keysperlevel != nullptr) {
127
0
        (*keysperlevel)[static_cast<int>(metadata[i].level)] += numkeysinfile;
128
0
      }
129
0
      fprintf(stderr, "level %d name %s smallest %s largest %s\n",
130
0
              metadata[i].level, metadata[i].name.c_str(),
131
0
              metadata[i].smallest.key.c_str(),
132
0
              metadata[i].largest.key.c_str());
133
0
    }
134
0
    return numKeys;
135
0
  }
136
137
4
  void CreateTwoLevels() {
138
4
    AddKeys(50000, 10000);
139
4
    DBImpl* dbi = dbfull();
140
4
    ASSERT_OK(dbi->TEST_FlushMemTable());
141
4
    ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
142
12
    for (int i = 0; i < 2; ++i) {
143
8
      ASSERT_OK(dbi->TEST_CompactRange(i, nullptr, nullptr));
144
8
    }
145
146
4
    AddKeys(50000, 10000);
147
4
    ASSERT_OK(dbi->TEST_FlushMemTable());
148
4
    ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
149
4
    ASSERT_OK(dbi->TEST_CompactRange(0, nullptr, nullptr));
150
4
  }
151
152
  void CheckFileTypeCounts(const std::string& dir,
153
                           int required_log,
154
                           int required_sst,
155
5
                           int required_manifest) {
156
5
    std::vector<std::string> filenames;
157
5
    ASSERT_OK(env_->GetChildren(dir, &filenames));
158
159
5
    int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
160
54
    for (auto file : filenames) {
161
54
      uint64_t number;
162
54
      FileType type;
163
54
      if (ParseFileName(file, &number, &type)) {
164
35
        log_cnt += (type == kLogFile);
165
35
        sst_cnt += (type == kTableFile);
166
35
        manifest_cnt += (type == kDescriptorFile);
167
35
      }
168
54
    }
169
5
    ASSERT_EQ(required_log, log_cnt);
170
5
    ASSERT_EQ(required_sst, sst_cnt);
171
5
    ASSERT_EQ(required_manifest, manifest_cnt);
172
5
  }
173
174
35
  DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_.get()); }
175
176
15
  CHECKED_STATUS FlushSync() {
177
15
    return dbfull()->TEST_FlushMemTable(/* wait = */ true);
178
15
  }
179
180
  Result<std::vector<LiveFileMetaData>> AddFiles(int num_sst_files, int num_key_per_sst);
181
182
  size_t TryDeleteFiles(
183
      const std::vector<LiveFileMetaData>& files, size_t max_files_to_delete,
184
      StopOnMaxFilesDeleted stop_on_max_files_deleted,
185
      boost::function<bool()> stop_condition);
186
};
187
188
1
TEST_F(DeleteFileTest, AddKeysAndQueryLevels) {
189
1
  CreateTwoLevels();
190
1
  std::vector<LiveFileMetaData> metadata;
191
1
  db_->GetLiveFilesMetaData(&metadata);
192
193
1
  std::string level1file = "";
194
1
  int level1keycount = 0;
195
1
  std::string level2file = "";
196
1
  int level2keycount = 0;
197
1
  int level1index = 0;
198
1
  int level2index = 1;
199
200
1
  ASSERT_EQ((int)metadata.size(), 2);
201
1
  if (metadata[0].level == 2) {
202
0
    level1index = 1;
203
0
    level2index = 0;
204
0
  }
205
206
1
  level1file = metadata[level1index].name;
207
1
  int startkey = atoi(metadata[level1index].smallest.key.c_str());
208
1
  int endkey = atoi(metadata[level1index].largest.key.c_str());
209
1
  level1keycount = (endkey - startkey + 1);
210
1
  level2file = metadata[level2index].name;
211
1
  startkey = atoi(metadata[level2index].smallest.key.c_str());
212
1
  endkey = atoi(metadata[level2index].largest.key.c_str());
213
1
  level2keycount = (endkey - startkey + 1);
214
215
  // COntrolled setup. Levels 1 and 2 should both have 50K files.
216
  // This is a little fragile as it depends on the current
217
  // compaction heuristics.
218
1
  ASSERT_EQ(level1keycount, 50000);
219
1
  ASSERT_EQ(level2keycount, 50000);
220
221
1
  Status status = db_->DeleteFile("0.sst");
222
1
  ASSERT_TRUE(status.IsInvalidArgument());
223
224
  // intermediate level files cannot be deleted.
225
1
  status = db_->DeleteFile(level1file);
226
1
  ASSERT_TRUE(status.IsInvalidArgument());
227
228
  // Lowest level file deletion should succeed.
229
1
  ASSERT_OK(db_->DeleteFile(level2file));
230
231
1
  CloseDB();
232
1
}
233
234
1
TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
235
1
  CreateTwoLevels();
236
  // there should be only one (empty) log file because CreateTwoLevels()
237
  // flushes the memtables to disk
238
1
  CheckFileTypeCounts(options_.wal_dir, 1, 0, 0);
239
  // 2 ssts, 1 manifest
240
1
  CheckFileTypeCounts(dbname_, 0, 2, 1);
241
1
  std::string first("0"), last("999999");
242
1
  CompactRangeOptions compact_options;
243
1
  compact_options.change_level = true;
244
1
  compact_options.target_level = 2;
245
1
  Slice first_slice(first), last_slice(last);
246
1
  ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
247
  // 1 sst after compaction
248
1
  CheckFileTypeCounts(dbname_, 0, 1, 1);
249
250
  // this time, we keep an iterator alive
251
1
  ASSERT_OK(ReopenDB(true));
252
1
  Iterator *itr = 0;
253
1
  CreateTwoLevels();
254
1
  itr = db_->NewIterator(ReadOptions());
255
1
  ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
256
  // 3 sst after compaction with live iterator
257
1
  CheckFileTypeCounts(dbname_, 0, 3, 1);
258
1
  delete itr;
259
  // 1 sst after iterator deletion
260
1
  CheckFileTypeCounts(dbname_, 0, 1, 1);
261
262
1
  CloseDB();
263
1
}
264
265
1
TEST_F(DeleteFileTest, DeleteFileWithIterator) {
266
1
  CreateTwoLevels();
267
1
  ReadOptions options;
268
1
  Iterator* it = db_->NewIterator(options);
269
1
  std::vector<LiveFileMetaData> metadata;
270
1
  db_->GetLiveFilesMetaData(&metadata);
271
272
1
  std::string level2file = "";
273
274
1
  ASSERT_EQ((int)metadata.size(), 2);
275
1
  if (metadata[0].level == 1) {
276
1
    level2file = metadata[1].name;
277
0
  } else {
278
0
    level2file = metadata[0].name;
279
0
  }
280
281
1
  Status status = db_->DeleteFile(level2file);
282
1
  fprintf(stdout, "Deletion status %s: %s\n",
283
1
          level2file.c_str(), status.ToString().c_str());
284
1
  ASSERT_TRUE(status.ok());
285
1
  it->SeekToFirst();
286
1
  int numKeysIterated = 0;
287
50.0k
  while(it->Valid()) {
288
50.0k
    numKeysIterated++;
289
50.0k
    it->Next();
290
50.0k
  }
291
1
  ASSERT_EQ(numKeysIterated, 50000);
292
1
  delete it;
293
1
  CloseDB();
294
1
}
295
296
1
TEST_F(DeleteFileTest, DeleteLogFiles) {
297
1
  AddKeys(10, 0);
298
1
  VectorLogPtr logfiles;
299
1
  ASSERT_OK(db_->GetSortedWalFiles(&logfiles));
300
1
  ASSERT_GT(logfiles.size(), 0UL);
301
  // Take the last log file which is expected to be alive and try to delete it
302
  // Should not succeed because live logs are not allowed to be deleted
303
1
  std::unique_ptr<LogFile> alive_log = std::move(logfiles.back());
304
1
  ASSERT_EQ(alive_log->Type(), kAliveLogFile);
305
1
  ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
306
1
  fprintf(stdout, "Deleting alive log file %s\n",
307
1
          alive_log->PathName().c_str());
308
1
  ASSERT_TRUE(!db_->DeleteFile(alive_log->PathName()).ok());
309
1
  ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
310
1
  logfiles.clear();
311
312
  // Call Flush to bring about a new working log file and add more keys
313
  // Call Flush again to flush out memtable and move alive log to archived log
314
  // and try to delete the archived log file
315
1
  FlushOptions fopts;
316
1
  ASSERT_OK(db_->Flush(fopts));
317
1
  AddKeys(10, 0);
318
1
  ASSERT_OK(db_->Flush(fopts));
319
1
  ASSERT_OK(db_->GetSortedWalFiles(&logfiles));
320
1
  ASSERT_GT(logfiles.size(), 0UL);
321
1
  std::unique_ptr<LogFile> archived_log = std::move(logfiles.front());
322
1
  ASSERT_EQ(archived_log->Type(), kArchivedLogFile);
323
1
  ASSERT_OK(
324
1
      env_->FileExists(options_.wal_dir + "/" + archived_log->PathName()));
325
1
  fprintf(stdout, "Deleting archived log file %s\n",
326
1
          archived_log->PathName().c_str());
327
1
  ASSERT_OK(db_->DeleteFile(archived_log->PathName()));
328
1
  ASSERT_TRUE(env_->FileExists(options_.wal_dir + "/" + archived_log->PathName()).IsNotFound());
329
1
  CloseDB();
330
1
}
331
332
1
TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) {
333
1
  CloseDB();
334
1
  DBOptions db_options;
335
1
  db_options.create_if_missing = true;
336
1
  db_options.create_missing_column_families = true;
337
1
  std::vector<ColumnFamilyDescriptor> column_families;
338
1
  column_families.emplace_back();
339
1
  column_families.emplace_back("new_cf", ColumnFamilyOptions());
340
341
1
  std::vector<rocksdb::ColumnFamilyHandle*> handles;
342
1
  rocksdb::DB* db;
343
1
  ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db));
344
345
1
  Random rnd(5);
346
1.00k
  for (int i = 0; i < 1000; ++i) {
347
1.00k
    ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10),
348
1.00k
                      test::RandomKey(&rnd, 10)));
349
1.00k
  }
350
1
  ASSERT_OK(db->Flush(FlushOptions(), handles[1]));
351
1.00k
  for (int i = 0; i < 1000; ++i) {
352
1.00k
    ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10),
353
1.00k
                      test::RandomKey(&rnd, 10)));
354
1.00k
  }
355
1
  ASSERT_OK(db->Flush(FlushOptions(), handles[1]));
356
357
1
  std::vector<LiveFileMetaData> metadata;
358
1
  db->GetLiveFilesMetaData(&metadata);
359
1
  ASSERT_EQ(2U, metadata.size());
360
1
  ASSERT_EQ("new_cf", metadata[0].column_family_name);
361
1
  ASSERT_EQ("new_cf", metadata[1].column_family_name);
362
1
  auto old_file = metadata[0].smallest.seqno < metadata[1].smallest.seqno
363
0
                      ? metadata[0].name
364
1
                      : metadata[1].name;
365
1
  auto new_file = metadata[0].smallest.seqno > metadata[1].smallest.seqno
366
1
                      ? metadata[0].name
367
0
                      : metadata[1].name;
368
1
  ASSERT_TRUE(db->DeleteFile(new_file).IsInvalidArgument());
369
1
  ASSERT_OK(db->DeleteFile(old_file));
370
371
1
  {
372
1
    std::unique_ptr<Iterator> itr(db->NewIterator(ReadOptions(), handles[1]));
373
1
    int count = 0;
374
1.00k
    for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
375
1.00k
      ASSERT_OK(itr->status());
376
1.00k
      ++count;
377
1.00k
    }
378
1
    ASSERT_EQ(count, 1000);
379
1
  }
380
381
1
  delete handles[0];
382
1
  delete handles[1];
383
1
  delete db;
384
385
1
  ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db));
386
1
  {
387
1
    std::unique_ptr<Iterator> itr(db->NewIterator(ReadOptions(), handles[1]));
388
1
    int count = 0;
389
1.00k
    for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
390
1.00k
      ASSERT_OK(itr->status());
391
1.00k
      ++count;
392
1.00k
    }
393
1
    ASSERT_EQ(count, 1000);
394
1
  }
395
396
1
  delete handles[0];
397
1
  delete handles[1];
398
1
  delete db;
399
1
}
400
401
Result<std::vector<LiveFileMetaData>> DeleteFileTest::AddFiles(
402
6
    const int num_sst_files, const int num_key_per_sst) {
403
6
  LOG(INFO) << "Writing " << num_sst_files << " SSTs";
404
18
  for (auto num = 0; num < num_sst_files; num++) {
405
12
    AddKeys(num_key_per_sst, 0);
406
12
    RETURN_NOT_OK(FlushSync());
407
12
  }
408
6
  std::vector<LiveFileMetaData> metadata;
409
6
  db_->GetLiveFilesMetaData(&metadata);
410
6
  return metadata;
411
6
}
412
413
size_t DeleteFileTest::TryDeleteFiles(
414
    const std::vector<LiveFileMetaData>& files, const size_t max_files_to_delete,
415
6
    const StopOnMaxFilesDeleted stop_on_max_files_deleted, boost::function<bool()> stop_condition) {
416
6
  size_t files_deleted = 0;
417
6
  LOG(INFO) << "Starting file deletion loop";
418
16
  while (!stop_condition()) {
419
23
    for (auto& file : files) {
420
23
      if (files_deleted >= max_files_to_delete) {
421
11
        if (stop_on_max_files_deleted) {
422
1
          return files_deleted;
423
1
        }
424
        // Just wait for stop condition.
425
10
        std::this_thread::sleep_for(10ms);
426
10
        continue;
427
10
      }
428
12
      if (db_->DeleteFile(file.name).ok()) {
429
6
        const auto file_path = file.db_path + file.name;
430
431
6
        std::vector<LiveFileMetaData> current_files;
432
6
        dbfull()->GetLiveFilesMetaData(&current_files);
433
6
        auto it = std::find_if(
434
6
            current_files.begin(), current_files.end(),
435
6
            [&](const auto& current_file) { return current_file.name == file.name; });
436
6
        if (it == current_files.end()) {
437
6
          LOG(INFO) << "Deleted file: " << file_path;
438
6
          ++files_deleted;
439
6
        }
440
6
      }
441
12
    }
442
11
  }
443
5
  return files_deleted;
444
6
}
445
446
namespace compaction_race {
447
448
constexpr auto kMaxNumSstFiles = 3;
449
constexpr auto kNumKeysPerSst = 10000;
450
451
} // namespace compaction_race
452
453
1
TEST_F(DeleteFileTest, DeleteWithManualCompaction) {
454
1
  rocksdb::SyncPoint::GetInstance()->LoadDependency({
455
1
      {"DBImpl::DeleteFile:DecidedToDelete", "DBImpl::RunManualCompaction"}
456
1
  });
457
458
4
  for (int num_sst_files = 1; num_sst_files <= compaction_race::kMaxNumSstFiles; ++num_sst_files) {
459
3
    ASSERT_OK(ReopenDB(/* create = */ true));
460
461
3
    auto metadata = ASSERT_RESULT(AddFiles(num_sst_files, compaction_race::kNumKeysPerSst));
462
463
3
    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
464
465
3
    std::atomic<bool> manual_compaction_done{false};
466
3
    Status manual_compaction_status;
467
468
3
    std::thread manual_compaction([this, &manual_compaction_done, &manual_compaction_status] {
469
3
      manual_compaction_status = db_->CompactRange(
470
3
          rocksdb::CompactRangeOptions(), /* begin = */ nullptr, /* end = */ nullptr);
471
3
      manual_compaction_done = true;
472
3
    });
473
474
3
    auto files_deleted = TryDeleteFiles(
475
3
        metadata, /* max_files_to_delete = */ 1, StopOnMaxFilesDeleted::kFalse,
476
8
        [&manual_compaction_done] {
477
8
          return manual_compaction_done.load(std::memory_order_acquire);
478
8
        });
479
480
3
    manual_compaction.join();
481
3
    ASSERT_OK(manual_compaction_status);
482
3
    ASSERT_EQ(files_deleted, 1);
483
484
3
    CloseDB();
485
486
3
    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
487
3
    rocksdb::SyncPoint::GetInstance()->ClearTrace();
488
3
  }
489
1
}
490
491
1
TEST_F(DeleteFileTest, DeleteWithBackgroundCompaction) {
492
1
  rocksdb::SyncPoint::GetInstance()->LoadDependency({
493
1
      {"DBImpl::DeleteFile:DecidedToDelete", "DBImpl::EnableAutoCompaction"},
494
1
      {"DBImpl::SchedulePendingCompaction:Done", "VersionSet::LogAndApply:WriteManifest"},
495
1
  });
496
497
4
  for (int num_sst_files = 1; num_sst_files <= compaction_race::kMaxNumSstFiles; ++num_sst_files) {
498
3
    auto listener = std::make_shared<CompactionStartedListener>();
499
3
    options_.level0_file_num_compaction_trigger = 2;
500
3
    options_.disable_auto_compactions = true;
501
3
    options_.listeners.push_back(listener);
502
3
    ASSERT_OK(ReopenDB(/* create = */ true));
503
504
3
    auto metadata = ASSERT_RESULT(AddFiles(num_sst_files, compaction_race::kNumKeysPerSst));
505
506
3
    const bool expect_compaction = num_sst_files > 1;
507
508
3
    rocksdb::SyncPoint::GetInstance()->EnableProcessing();
509
510
3
    std::atomic<bool> pending_compaction{false};
511
3
    std::thread enable_compactions([this, &pending_compaction] {
512
3
      ColumnFamilyData* cfd =
513
3
          static_cast<ColumnFamilyHandleImpl*>(db_->DefaultColumnFamily())->cfd();
514
3
      ASSERT_OK(db_->EnableAutoCompaction({db_->DefaultColumnFamily()}));
515
3
      {
516
3
        dbfull()->TEST_LockMutex();
517
3
        pending_compaction = cfd->pending_compaction();
518
3
        dbfull()->TEST_UnlockMutex();
519
3
      }
520
3
      LOG(INFO) << "pending_compaction: " << pending_compaction;
521
3
    });
522
523
3
    auto files_deleted = TryDeleteFiles(
524
3
        metadata, /* max_files_to_delete = */ 1, StopOnMaxFilesDeleted(!expect_compaction),
525
8
        [this, &listener] {
526
          // Stop after compaction has been started and finished.
527
8
          return listener->GetNumCompactionsStarted() > 0 &&
528
4
                 dbfull()->TEST_NumTotalRunningCompactions() == 0;
529
8
        });
530
531
3
    rocksdb::SyncPoint::GetInstance()->DisableProcessing();
532
3
    rocksdb::SyncPoint::GetInstance()->ClearTrace();
533
534
3
    enable_compactions.join();
535
3
    EXPECT_EQ(files_deleted, 1);
536
3
    if (!expect_compaction) {
537
1
      EXPECT_FALSE(pending_compaction);
538
1
      EXPECT_EQ(listener->GetNumCompactionsStarted(), 0);
539
1
    }
540
541
3
    ASSERT_NO_FATALS(AddKeys(10, 0));
542
3
    ASSERT_OK(FlushSync());
543
544
3
    CloseDB();
545
3
  }
546
1
}
547
548
} // namespace rocksdb
549
550
13.2k
int main(int argc, char** argv) {
551
13.2k
  ::testing::InitGoogleTest(&argc, argv);
552
13.2k
  return RUN_ALL_TESTS();
553
13.2k
}
554
555
#else
556
#include <stdio.h>
557
558
int main(int argc, char** argv) {
559
  fprintf(stderr,
560
          "SKIPPED as DBImpl::DeleteFile is not supported in ROCKSDB_LITE\n");
561
  return 0;
562
}
563
564
#endif  // !ROCKSDB_LITE