YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/rocksdb/db/listener_test.cc
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under the BSD-style license found in the
3
//  LICENSE file in the root directory of this source tree. An additional grant
4
//  of patent rights can be found in the PATENTS file in the same directory.
5
//
6
// The following only applies to changes made to this file as part of YugaByte development.
7
//
8
// Portions Copyright (c) YugaByte, Inc.
9
//
10
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
11
// in compliance with the License.  You may obtain a copy of the License at
12
//
13
// http://www.apache.org/licenses/LICENSE-2.0
14
//
15
// Unless required by applicable law or agreed to in writing, software distributed under the License
16
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
17
// or implied.  See the License for the specific language governing permissions and limitations
18
// under the License.
19
//
20
21
#include "yb/rocksdb/db/db_test_util.h"
22
#include "yb/rocksdb/util/sync_point.h"
23
24
#ifndef ROCKSDB_LITE
25
26
namespace rocksdb {
27
28
class EventListenerTest : public DBTestBase {
29
 public:
30
8
  EventListenerTest() : DBTestBase("/listener_test") {}
31
32
  const size_t k110KB = 110 << 10;
33
};
34
35
struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector {
36
  virtual rocksdb::Status AddUserKey(const rocksdb::Slice& key,
37
                                     const rocksdb::Slice& value,
38
                                     rocksdb::EntryType type,
39
                                     rocksdb::SequenceNumber seq,
40
106
                                     uint64_t file_size) override {
41
106
    return Status::OK();
42
106
  }
43
  virtual rocksdb::Status Finish(
44
212
      rocksdb::UserCollectedProperties* properties) override {
45
212
    properties->insert({"0", "1"});
46
212
    return Status::OK();
47
212
  }
48
49
0
  const char* Name() const override {
50
0
    return "TestTablePropertiesCollector";
51
0
  }
52
53
106
  rocksdb::UserCollectedProperties GetReadableProperties() const override {
54
106
    rocksdb::UserCollectedProperties ret;
55
106
    ret["2"] = "3";
56
106
    return ret;
57
106
  }
58
};
59
60
class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory {
61
 public:
62
  virtual TablePropertiesCollector* CreateTablePropertiesCollector(
63
106
      TablePropertiesCollectorFactory::Context context) override {
64
106
    return new TestPropertiesCollector;
65
106
  }
66
88
  const char* Name() const override { return "TestTablePropertiesCollector"; }
67
};
68
69
class TestCompactionListener : public EventListener {
70
 public:
71
7
  void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) override {
72
7
    std::lock_guard<std::mutex> lock(mutex_);
73
7
    compacted_dbs_.push_back(db);
74
7
    ASSERT_GT(ci.input_files.size(), 0U);
75
7
    ASSERT_GT(ci.output_files.size(), 0U);
76
7
    ASSERT_EQ(db->GetEnv()->GetThreadID(), ci.thread_id);
77
7
    ASSERT_GT(ci.thread_id, 0U);
78
79
14
    for (auto fl : {ci.input_files, ci.output_files}) {
80
14
      for (auto fn : fl) {
81
14
        auto it = ci.table_properties.find(fn);
82
14
        ASSERT_NE(it, ci.table_properties.end());
83
14
        auto tp = it->second;
84
14
        ASSERT_TRUE(tp != nullptr);
85
14
        ASSERT_EQ(tp->user_collected_properties.find("0")->second, "1");
86
14
      }
87
14
    }
88
7
  }
89
90
  std::vector<DB*> compacted_dbs_;
91
  std::mutex mutex_;
92
};
93
94
1
TEST_F(EventListenerTest, OnSingleDBCompactionTest) {
95
1
  const int kTestKeySize = 16;
96
1
  const int kTestValueSize = 984;
97
1
  const int kEntrySize = kTestKeySize + kTestValueSize;
98
1
  const int kEntriesPerBuffer = 100;
99
1
  const int kNumL0Files = 4;
100
101
1
  Options options;
102
1
  options.create_if_missing = true;
103
1
  options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
104
1
  options.compaction_style = kCompactionStyleLevel;
105
1
  options.target_file_size_base = options.write_buffer_size;
106
1
  options.max_bytes_for_level_base = options.target_file_size_base * 2;
107
1
  options.max_bytes_for_level_multiplier = 2;
108
1
  options.compression = kNoCompression;
109
1
  options.level0_file_num_compaction_trigger = kNumL0Files;
110
1
  options.table_properties_collector_factories.push_back(
111
1
      std::make_shared<TestPropertiesCollectorFactory>());
112
113
1
  TestCompactionListener* listener = new TestCompactionListener();
114
1
  options.listeners.emplace_back(listener);
115
1
  std::vector<std::string> cf_names = {
116
1
      "pikachu", "ilya", "muromec", "dobrynia",
117
1
      "nikitich", "alyosha", "popovich"};
118
1
  CreateAndReopenWithCF(cf_names, options);
119
1
  ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p')));
120
1
  ASSERT_OK(Put(2, "ilya", std::string(90000, 'i')));
121
1
  ASSERT_OK(Put(3, "muromec", std::string(90000, 'm')));
122
1
  ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd')));
123
1
  ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n')));
124
1
  ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a')));
125
1
  ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
126
8
  for (int i = 1; i < 8; ++i) {
127
7
    ASSERT_OK(Flush(i));
128
7
    const Slice kRangeStart = "a";
129
7
    const Slice kRangeEnd = "z";
130
7
    ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i],
131
7
                                     &kRangeStart, &kRangeEnd));
132
7
    ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
133
7
    ASSERT_OK(dbfull()->TEST_WaitForCompact());
134
7
  }
135
136
1
  ASSERT_EQ(listener->compacted_dbs_.size(), cf_names.size());
137
8
  for (size_t i = 0; i < cf_names.size(); ++i) {
138
7
    ASSERT_EQ(listener->compacted_dbs_[i], db_);
139
7
  }
140
1
}
141
142
// This simple Listener can only handle one flush at a time.
143
class TestFlushListener : public EventListener {
144
 public:
145
  explicit TestFlushListener(Env* env)
146
13
      : slowdown_count(0), stop_count(0), db_closed(), env_(env) {
147
13
    db_closed = false;
148
13
  }
149
  void OnTableFileCreated(
150
414
      const TableFileCreationInfo& info) override {
151
    // remember the info for later checking the FlushJobInfo.
152
414
    prev_fc_info_ = info;
153
414
    ASSERT_GT(info.db_name.size(), 0U);
154
414
    ASSERT_GT(info.cf_name.size(), 0U);
155
414
    ASSERT_GT(info.file_path.size(), 0U);
156
414
    ASSERT_GT(info.job_id, 0);
157
414
    ASSERT_GT(info.table_properties.data_size, 0U);
158
414
    ASSERT_GT(info.table_properties.raw_key_size, 0U);
159
414
    ASSERT_GT(info.table_properties.raw_value_size, 0U);
160
414
    ASSERT_GT(info.table_properties.num_data_blocks, 0U);
161
414
    ASSERT_GT(info.table_properties.num_entries, 0U);
162
414
  }
163
164
  void OnFlushCompleted(
165
414
      DB* db, const FlushJobInfo& info) override {
166
414
    flushed_dbs_.push_back(db);
167
414
    flushed_column_family_names_.push_back(info.cf_name);
168
414
    if (info.triggered_writes_slowdown) {
169
46
      slowdown_count++;
170
46
    }
171
414
    if (info.triggered_writes_stop) {
172
0
      stop_count++;
173
0
    }
174
    // verify whether the previously created file matches the flushed file.
175
414
    ASSERT_EQ(prev_fc_info_.db_name, db->GetName());
176
414
    ASSERT_EQ(prev_fc_info_.cf_name, info.cf_name);
177
414
    ASSERT_EQ(prev_fc_info_.job_id, info.job_id);
178
414
    ASSERT_EQ(prev_fc_info_.file_path, info.file_path);
179
414
    ASSERT_EQ(db->GetEnv()->GetThreadID(), info.thread_id);
180
414
    ASSERT_GT(info.thread_id, 0U);
181
414
    ASSERT_EQ(info.table_properties.user_collected_properties.find("0")->second,
182
414
              "1");
183
414
  }
184
185
  std::vector<std::string> flushed_column_family_names_;
186
  std::vector<DB*> flushed_dbs_;
187
  int slowdown_count;
188
  int stop_count;
189
  bool db_closing;
190
  std::atomic_bool db_closed;
191
  TableFileCreationInfo prev_fc_info_;
192
193
 protected:
194
  Env* env_;
195
};
196
197
1
TEST_F(EventListenerTest, OnSingleDBFlushTest) {
198
1
  Options options;
199
1
  options.write_buffer_size = k110KB;
200
1
  TestFlushListener* listener = new TestFlushListener(options.env);
201
1
  options.listeners.emplace_back(listener);
202
1
  std::vector<std::string> cf_names = {
203
1
      "pikachu", "ilya", "muromec", "dobrynia",
204
1
      "nikitich", "alyosha", "popovich"};
205
1
  options.table_properties_collector_factories.push_back(
206
1
      std::make_shared<TestPropertiesCollectorFactory>());
207
1
  CreateAndReopenWithCF(cf_names, options);
208
209
1
  ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p')));
210
1
  ASSERT_OK(Put(2, "ilya", std::string(90000, 'i')));
211
1
  ASSERT_OK(Put(3, "muromec", std::string(90000, 'm')));
212
1
  ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd')));
213
1
  ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n')));
214
1
  ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a')));
215
1
  ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
216
8
  for (int i = 1; i < 8; ++i) {
217
7
    ASSERT_OK(Flush(i));
218
7
    ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
219
7
    ASSERT_EQ(listener->flushed_dbs_.size(), i);
220
7
    ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
221
7
  }
222
223
  // make sure call-back functions are called in the right order
224
8
  for (size_t i = 0; i < cf_names.size(); ++i) {
225
7
    ASSERT_EQ(listener->flushed_dbs_[i], db_);
226
7
    ASSERT_EQ(listener->flushed_column_family_names_[i], cf_names[i]);
227
7
  }
228
1
}
229
230
1
TEST_F(EventListenerTest, MultiCF) {
231
1
  Options options;
232
1
  options.write_buffer_size = k110KB;
233
1
  TestFlushListener* listener = new TestFlushListener(options.env);
234
1
  options.listeners.emplace_back(listener);
235
1
  options.table_properties_collector_factories.push_back(
236
1
      std::make_shared<TestPropertiesCollectorFactory>());
237
1
  std::vector<std::string> cf_names = {
238
1
      "pikachu", "ilya", "muromec", "dobrynia",
239
1
      "nikitich", "alyosha", "popovich"};
240
1
  CreateAndReopenWithCF(cf_names, options);
241
242
1
  ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p')));
243
1
  ASSERT_OK(Put(2, "ilya", std::string(90000, 'i')));
244
1
  ASSERT_OK(Put(3, "muromec", std::string(90000, 'm')));
245
1
  ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd')));
246
1
  ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n')));
247
1
  ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a')));
248
1
  ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
249
8
  for (int i = 1; i < 8; ++i) {
250
7
    ASSERT_OK(Flush(i));
251
7
    ASSERT_EQ(listener->flushed_dbs_.size(), i);
252
7
    ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
253
7
  }
254
255
  // make sure call-back functions are called in the right order
256
8
  for (size_t i = 0; i < cf_names.size(); i++) {
257
7
    ASSERT_EQ(listener->flushed_dbs_[i], db_);
258
7
    ASSERT_EQ(listener->flushed_column_family_names_[i], cf_names[i]);
259
7
  }
260
1
}
261
262
1
TEST_F(EventListenerTest, MultiDBMultiListeners) {
263
1
  Options options;
264
1
  options.table_properties_collector_factories.push_back(
265
1
      std::make_shared<TestPropertiesCollectorFactory>());
266
1
  std::vector<TestFlushListener*> listeners;
267
1
  const int kNumDBs = 5;
268
1
  const int kNumListeners = 10;
269
11
  for (int i = 0; i < kNumListeners; ++i) {
270
10
    listeners.emplace_back(new TestFlushListener(options.env));
271
10
  }
272
273
1
  std::vector<std::string> cf_names = {
274
1
      "pikachu", "ilya", "muromec", "dobrynia",
275
1
      "nikitich", "alyosha", "popovich"};
276
277
1
  options.create_if_missing = true;
278
11
  for (int i = 0; i < kNumListeners; ++i) {
279
10
    options.listeners.emplace_back(listeners[i]);
280
10
  }
281
1
  DBOptions db_opts(options);
282
1
  ColumnFamilyOptions cf_opts(options);
283
284
1
  std::vector<DB*> dbs;
285
1
  std::vector<std::vector<ColumnFamilyHandle *>> vec_handles;
286
287
6
  for (int d = 0; d < kNumDBs; ++d) {
288
5
    ASSERT_OK(DestroyDB(dbname_ + ToString(d), options));
289
5
    DB* db;
290
5
    std::vector<ColumnFamilyHandle*> handles;
291
5
    ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db));
292
40
    for (size_t c = 0; c < cf_names.size(); ++c) {
293
35
      ColumnFamilyHandle* handle;
294
35
      ASSERT_OK(db->CreateColumnFamily(cf_opts, cf_names[c], &handle));
295
35
      handles.push_back(handle);
296
35
    }
297
298
5
    vec_handles.push_back(std::move(handles));
299
5
    dbs.push_back(db);
300
5
  }
301
302
6
  for (int d = 0; d < kNumDBs; ++d) {
303
40
    for (size_t c = 0; c < cf_names.size(); ++c) {
304
35
      ASSERT_OK(dbs[d]->Put(WriteOptions(), vec_handles[d][c],
305
35
                cf_names[c], cf_names[c]));
306
35
    }
307
5
  }
308
309
8
  for (size_t c = 0; c < cf_names.size(); ++c) {
310
42
    for (int d = 0; d < kNumDBs; ++d) {
311
35
      ASSERT_OK(dbs[d]->Flush(FlushOptions(), vec_handles[d][c]));
312
35
      ASSERT_OK(reinterpret_cast<DBImpl*>(dbs[d])->TEST_WaitForFlushMemTable());
313
35
    }
314
7
  }
315
316
10
  for (auto* listener : listeners) {
317
10
    int pos = 0;
318
80
    for (size_t c = 0; c < cf_names.size(); ++c) {
319
420
      for (int d = 0; d < kNumDBs; ++d) {
320
350
        ASSERT_EQ(listener->flushed_dbs_[pos], dbs[d]);
321
350
        ASSERT_EQ(listener->flushed_column_family_names_[pos], cf_names[c]);
322
350
        pos++;
323
350
      }
324
70
    }
325
10
  }
326
327
328
5
  for (auto handles : vec_handles) {
329
35
    for (auto h : handles) {
330
35
      delete h;
331
35
    }
332
5
    handles.clear();
333
5
  }
334
1
  vec_handles.clear();
335
336
5
  for (auto db : dbs) {
337
5
    delete db;
338
5
  }
339
1
}
340
341
1
TEST_F(EventListenerTest, DisableBGCompaction) {
342
1
  Options options;
343
1
  TestFlushListener* listener = new TestFlushListener(options.env);
344
1
  const int kCompactionTrigger = 1;
345
1
  const int kSlowdownTrigger = 5;
346
1
  const int kStopTrigger = 100;
347
1
  options.level0_file_num_compaction_trigger = kCompactionTrigger;
348
1
  options.level0_slowdown_writes_trigger = kSlowdownTrigger;
349
1
  options.level0_stop_writes_trigger = kStopTrigger;
350
1
  options.max_write_buffer_number = 10;
351
1
  options.listeners.emplace_back(listener);
352
  // BG compaction is disabled.  Number of L0 files will simply keeps
353
  // increasing in this test.
354
1
  options.compaction_style = kCompactionStyleNone;
355
1
  options.compression = kNoCompression;
356
1
  options.write_buffer_size = 100000;  // Small write buffer
357
1
  options.table_properties_collector_factories.push_back(
358
1
      std::make_shared<TestPropertiesCollectorFactory>());
359
360
1
  CreateAndReopenWithCF({"pikachu"}, options);
361
1
  ColumnFamilyMetaData cf_meta;
362
1
  db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
363
364
  // keep writing until writes are forced to stop.
365
51
  for (int i = 0; static_cast<int>(cf_meta.file_count) < kSlowdownTrigger * 10;
366
50
       ++i) {
367
50
    ASSERT_OK(Put(1, ToString(i), std::string(10000, 'x'), WriteOptions()));
368
50
    ASSERT_OK(db_->Flush(FlushOptions(), handles_[1]));
369
50
    db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
370
50
  }
371
1
  ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9);
372
1
}
373
374
class TestCompactionReasonListener : public EventListener {
375
 public:
376
21
  void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
377
21
    std::lock_guard<std::mutex> lock(mutex_);
378
21
    compaction_reasons_.push_back(ci.compaction_reason);
379
21
  }
380
381
  std::vector<CompactionReason> compaction_reasons_;
382
  std::mutex mutex_;
383
};
384
385
1
TEST_F(EventListenerTest, CompactionReasonLevel) {
386
1
  Options options;
387
1
  options.create_if_missing = true;
388
1
  options.memtable_factory.reset(
389
1
      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
390
391
1
  TestCompactionReasonListener* listener = new TestCompactionReasonListener();
392
1
  options.listeners.emplace_back(listener);
393
394
1
  options.level0_file_num_compaction_trigger = 4;
395
1
  options.compaction_style = kCompactionStyleLevel;
396
397
1
  DestroyAndReopen(options);
398
1
  Random rnd(301);
399
400
  // Write 4 files in L0
401
5
  for (int i = 0; i < 4; i++) {
402
4
    GenerateNewRandomFile(&rnd);
403
4
  }
404
1
  ASSERT_OK(dbfull()->TEST_WaitForCompact());
405
406
1
  ASSERT_EQ(listener->compaction_reasons_.size(), 1);
407
1
  ASSERT_EQ(listener->compaction_reasons_[0],
408
1
            CompactionReason::kLevelL0FilesNum);
409
410
1
  DestroyAndReopen(options);
411
412
  // Write 3 non-overlapping files in L0
413
31
  for (int k = 1; k <= 30; k++) {
414
30
    ASSERT_OK(Put(Key(k), Key(k)));
415
30
    if (k % 10 == 0) {
416
3
      ASSERT_OK(Flush());
417
3
    }
418
30
  }
419
420
  // Do a trivial move from L0 -> L1
421
1
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
422
423
1
  options.max_bytes_for_level_base = 1;
424
1
  Close();
425
1
  listener->compaction_reasons_.clear();
426
1
  Reopen(options);
427
428
1
  ASSERT_OK(dbfull()->TEST_WaitForCompact());
429
1
  ASSERT_GT(listener->compaction_reasons_.size(), 1);
430
431
11
  for (auto compaction_reason : listener->compaction_reasons_) {
432
11
    ASSERT_EQ(compaction_reason, CompactionReason::kLevelMaxLevelSize);
433
11
  }
434
435
1
  options.disable_auto_compactions = true;
436
1
  Close();
437
1
  listener->compaction_reasons_.clear();
438
1
  Reopen(options);
439
440
1
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
441
1
  ASSERT_GT(listener->compaction_reasons_.size(), 0);
442
1
  for (auto compaction_reason : listener->compaction_reasons_) {
443
1
    ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction);
444
1
  }
445
1
}
446
447
1
TEST_F(EventListenerTest, CompactionReasonUniversal) {
448
1
  Options options;
449
1
  options.create_if_missing = true;
450
1
  options.memtable_factory.reset(
451
1
      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
452
453
1
  TestCompactionReasonListener* listener = new TestCompactionReasonListener();
454
1
  options.listeners.emplace_back(listener);
455
456
1
  options.compaction_style = kCompactionStyleUniversal;
457
458
1
  Random rnd(301);
459
460
1
  options.level0_file_num_compaction_trigger = 8;
461
1
  options.compaction_options_universal.max_size_amplification_percent = 100000;
462
1
  options.compaction_options_universal.size_ratio = 100000;
463
1
  DestroyAndReopen(options);
464
1
  listener->compaction_reasons_.clear();
465
466
  // Write 8 files in L0
467
9
  for (int i = 0; i < 8; i++) {
468
8
    GenerateNewRandomFile(&rnd);
469
8
  }
470
1
  ASSERT_OK(dbfull()->TEST_WaitForCompact());
471
472
1
  ASSERT_GT(listener->compaction_reasons_.size(), 0);
473
1
  for (auto compaction_reason : listener->compaction_reasons_) {
474
1
    ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSortedRunNum);
475
1
  }
476
477
1
  options.level0_file_num_compaction_trigger = 8;
478
1
  options.compaction_options_universal.max_size_amplification_percent = 1;
479
1
  options.compaction_options_universal.size_ratio = 100000;
480
481
1
  DestroyAndReopen(options);
482
1
  listener->compaction_reasons_.clear();
483
484
  // Write 8 files in L0
485
9
  for (int i = 0; i < 8; i++) {
486
8
    GenerateNewRandomFile(&rnd);
487
8
  }
488
1
  ASSERT_OK(dbfull()->TEST_WaitForCompact());
489
490
1
  ASSERT_GT(listener->compaction_reasons_.size(), 0);
491
1
  for (auto compaction_reason : listener->compaction_reasons_) {
492
1
    ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSizeAmplification);
493
1
  }
494
495
1
  options.disable_auto_compactions = true;
496
1
  Close();
497
1
  listener->compaction_reasons_.clear();
498
1
  Reopen(options);
499
500
1
  ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
501
502
1
  ASSERT_GT(listener->compaction_reasons_.size(), 0);
503
1
  for (auto compaction_reason : listener->compaction_reasons_) {
504
1
    ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction);
505
1
  }
506
1
}
507
508
1
TEST_F(EventListenerTest, CompactionReasonFIFO) {
509
1
  Options options;
510
1
  options.create_if_missing = true;
511
1
  options.memtable_factory.reset(
512
1
      new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
513
514
1
  TestCompactionReasonListener* listener = new TestCompactionReasonListener();
515
1
  options.listeners.emplace_back(listener);
516
517
1
  options.level0_file_num_compaction_trigger = 4;
518
1
  options.compaction_style = kCompactionStyleFIFO;
519
1
  options.compaction_options_fifo.max_table_files_size = 1;
520
521
1
  DestroyAndReopen(options);
522
1
  Random rnd(301);
523
524
  // Write 4 files in L0
525
5
  for (int i = 0; i < 4; i++) {
526
4
    GenerateNewRandomFile(&rnd);
527
4
  }
528
1
  ASSERT_OK(dbfull()->TEST_WaitForCompact());
529
530
1
  ASSERT_GT(listener->compaction_reasons_.size(), 0);
531
4
  for (auto compaction_reason : listener->compaction_reasons_) {
532
4
    ASSERT_EQ(compaction_reason, CompactionReason::kFIFOMaxSize);
533
4
  }
534
1
}
535
}  // namespace rocksdb
536
537
#endif  // ROCKSDB_LITE
538
539
13.2k
int main(int argc, char** argv) {
540
13.2k
  ::testing::InitGoogleTest(&argc, argv);
541
13.2k
  return RUN_ALL_TESTS();
542
13.2k
}