/Users/deen/code/yugabyte-db/src/yb/rocksdb/db/db_test_util.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
21 | | // Use of this source code is governed by a BSD-style license that can be |
22 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
23 | | #ifndef YB_ROCKSDB_DB_DB_TEST_UTIL_H |
24 | | #define YB_ROCKSDB_DB_DB_TEST_UTIL_H |
25 | | |
26 | | #pragma once |
27 | | #ifndef __STDC_FORMAT_MACROS |
28 | | #define __STDC_FORMAT_MACROS |
29 | | #endif |
30 | | |
31 | | #include <fcntl.h> |
32 | | #include <inttypes.h> |
33 | | #ifndef OS_WIN |
34 | | #include <unistd.h> |
35 | | #endif |
36 | | |
37 | | #include <algorithm> |
38 | | #include <map> |
39 | | #include <set> |
40 | | #include <string> |
41 | | #include <thread> |
42 | | #include <unordered_set> |
43 | | #include <utility> |
44 | | #include <vector> |
45 | | |
46 | | #include <gtest/gtest.h> |
47 | | |
48 | | #include "yb/encryption/encryption_fwd.h" |
49 | | |
50 | | #include "yb/rocksdb/db/db_impl.h" |
51 | | #include "yb/rocksdb/db/dbformat.h" |
52 | | #include "yb/rocksdb/db/filename.h" |
53 | | #include "yb/rocksdb/memtable/hash_linklist_rep.h" |
54 | | #include "yb/rocksdb/cache.h" |
55 | | #include "yb/rocksdb/compaction_filter.h" |
56 | | #include "yb/rocksdb/convenience.h" |
57 | | #include "yb/rocksdb/db.h" |
58 | | #include "yb/rocksdb/env.h" |
59 | | #include "yb/rocksdb/filter_policy.h" |
60 | | #include "yb/rocksdb/options.h" |
61 | | #include "yb/util/slice.h" |
62 | | #include "yb/rocksdb/table.h" |
63 | | #include "yb/rocksdb/utilities/checkpoint.h" |
64 | | #include "yb/rocksdb/table/block_based_table_factory.h" |
65 | | #include "yb/rocksdb/table/mock_table.h" |
66 | | #include "yb/rocksdb/table/plain_table_factory.h" |
67 | | #include "yb/rocksdb/table/scoped_arena_iterator.h" |
68 | | #include "yb/rocksdb/util/compression.h" |
69 | | #include "yb/rocksdb/util/mock_env.h" |
70 | | #include "yb/rocksdb/util/mutexlock.h" |
71 | | |
72 | | #include "yb/util/string_util.h" |
73 | | // SyncPoint is not supported in Released Windows Mode. |
74 | | #if !(defined NDEBUG) || !defined(OS_WIN) |
75 | | #include "yb/rocksdb/util/sync_point.h" |
76 | | #endif // !(defined NDEBUG) || !defined(OS_WIN) |
77 | | #include "yb/rocksdb/util/testharness.h" |
78 | | #include "yb/rocksdb/util/testutil.h" |
79 | | #include "yb/rocksdb/util/xfunc.h" |
80 | | #include "yb/rocksdb/utilities/merge_operators.h" |
81 | | |
82 | | #include "yb/util/test_util.h" // For ASSERT_OK |
83 | | |
84 | | namespace rocksdb { |
85 | | |
86 | 1.49k | uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) { |
87 | 1.49k | return options.statistics->getTickerCount(ticker_type); |
88 | 1.49k | } |
89 | | |
90 | 10 | void TestResetTickerCount(const Options& options, Tickers ticker_type) { |
91 | 10 | return options.statistics->setTickerCount(ticker_type, 0); |
92 | 10 | } |
93 | | |
94 | | // Update options for RocksDB to be redirected to GLOG via YBRocksDBLogger. |
95 | | void ConfigureLoggingToGlog(Options* options, const std::string& log_prefix = "TEST: "); |
96 | | |
97 | | class OnFileDeletionListener : public EventListener { |
98 | | public: |
99 | | OnFileDeletionListener() : |
100 | | matched_count_(0), |
101 | | expected_file_name_("") {} |
102 | | |
103 | | void SetExpectedFileName( |
104 | | const std::string file_name) { |
105 | | expected_file_name_ = file_name; |
106 | | } |
107 | | |
108 | | void VerifyMatchedCount(size_t expected_value) { |
109 | | ASSERT_EQ(matched_count_, expected_value); |
110 | | } |
111 | | |
112 | | void OnTableFileDeleted( |
113 | | const TableFileDeletionInfo& info) override { |
114 | | if (expected_file_name_ != "") { |
115 | | ASSERT_EQ(expected_file_name_, info.file_path); |
116 | | expected_file_name_ = ""; |
117 | | matched_count_++; |
118 | | } |
119 | | } |
120 | | |
121 | | private: |
122 | | size_t matched_count_; |
123 | | std::string expected_file_name_; |
124 | | }; |
125 | | |
126 | | class CompactionStartedListener : public EventListener { |
127 | | public: |
128 | | void OnCompactionStarted() override { |
129 | | ++num_compactions_started_; |
130 | | } |
131 | | |
132 | | int GetNumCompactionsStarted() { return num_compactions_started_; } |
133 | | |
134 | | private: |
135 | | std::atomic<int> num_compactions_started_; |
136 | | }; |
137 | | |
138 | | namespace anon { |
139 | | class AtomicCounter { |
140 | | public: |
141 | | explicit AtomicCounter(Env* env = NULL) |
142 | 1.65k | : env_(env), cond_count_(&mu_), count_(0) {} |
143 | | |
144 | 112k | void Increment() { |
145 | 112k | MutexLock l(&mu_); |
146 | 112k | count_++; |
147 | 112k | cond_count_.SignalAll(); |
148 | 112k | } |
149 | | |
150 | | int Read() { |
151 | | MutexLock l(&mu_); |
152 | | return count_; |
153 | | } |
154 | | |
155 | 0 | bool WaitFor(int count) { |
156 | 0 | MutexLock l(&mu_); |
157 | 0 |
|
158 | 0 | uint64_t start = env_->NowMicros(); |
159 | 0 | while (count_ < count) { |
160 | 0 | uint64_t now = env_->NowMicros(); |
161 | 0 | cond_count_.TimedWait(now + /*1s*/ 1 * 1000 * 1000); |
162 | 0 | if (env_->NowMicros() - start > /*10s*/ 10 * 1000 * 1000) { |
163 | 0 | return false; |
164 | 0 | } |
165 | 0 | if (count_ < count) { |
166 | 0 | GTEST_LOG_(WARNING) << "WaitFor is taking more time than usual"; |
167 | 0 | } |
168 | 0 | } |
169 | 0 |
|
170 | 0 | return true; |
171 | 0 | } |
172 | | |
173 | | void Reset() { |
174 | | MutexLock l(&mu_); |
175 | | count_ = 0; |
176 | | cond_count_.SignalAll(); |
177 | | } |
178 | | |
179 | | private: |
180 | | Env* env_; |
181 | | port::Mutex mu_; |
182 | | port::CondVar cond_count_; |
183 | | int count_; |
184 | | }; |
185 | | |
186 | | struct OptionsOverride { |
187 | | std::shared_ptr<const FilterPolicy> filter_policy = nullptr; |
188 | | |
189 | | // Used as a bit mask of individual enums in which to skip an XF test point |
190 | | int skip_policy = 0; |
191 | | }; |
192 | | |
193 | | } // namespace anon |
194 | | |
195 | | // A hacky skip list mem table that triggers flush after number of entries. |
196 | | class SpecialMemTableRep : public MemTableRep { |
197 | | public: |
198 | | explicit SpecialMemTableRep(MemTableAllocator* allocator, |
199 | | MemTableRep* memtable, int num_entries_flush) |
200 | | : MemTableRep(allocator), |
201 | | memtable_(memtable), |
202 | | num_entries_flush_(num_entries_flush), |
203 | | num_entries_(0) {} |
204 | | |
205 | | virtual KeyHandle Allocate(const size_t len, char** buf) override { |
206 | | return memtable_->Allocate(len, buf); |
207 | | } |
208 | | |
209 | | // Insert key into the list. |
210 | | // REQUIRES: nothing that compares equal to key is currently in the list. |
211 | | virtual void Insert(KeyHandle handle) override { |
212 | | memtable_->Insert(handle); |
213 | | num_entries_++; |
214 | | } |
215 | | |
216 | | // Returns true iff an entry that compares equal to key is in the list. |
217 | 0 | virtual bool Contains(const char* key) const override { |
218 | 0 | return memtable_->Contains(key); |
219 | 0 | } |
220 | | |
221 | | virtual size_t ApproximateMemoryUsage() override { |
222 | | // Return a high memory usage when number of entries exceeds the threshold |
223 | | // to trigger a flush. |
224 | | return (num_entries_ < num_entries_flush_) ? 0 : 1024 * 1024 * 1024; |
225 | | } |
226 | | |
227 | | virtual void Get(const LookupKey& k, void* callback_args, |
228 | | bool (*callback_func)(void* arg, |
229 | | const char* entry)) override { |
230 | | memtable_->Get(k, callback_args, callback_func); |
231 | | } |
232 | | |
233 | | uint64_t ApproximateNumEntries(const Slice& start_ikey, |
234 | 0 | const Slice& end_ikey) override { |
235 | 0 | return memtable_->ApproximateNumEntries(start_ikey, end_ikey); |
236 | 0 | } |
237 | | |
238 | | virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override { |
239 | | return memtable_->GetIterator(arena); |
240 | | } |
241 | | |
242 | | virtual ~SpecialMemTableRep() override {} |
243 | | |
244 | | private: |
245 | | unique_ptr<MemTableRep> memtable_; |
246 | | int num_entries_flush_; |
247 | | int num_entries_; |
248 | | }; |
249 | | |
250 | | // The factory for the hacky skip list mem table that triggers flush after |
251 | | // number of entries exceeds a threshold. |
252 | | class SpecialSkipListFactory : public MemTableRepFactory { |
253 | | public: |
254 | | // After number of inserts exceeds `num_entries_flush` in a mem table, trigger |
255 | | // flush. |
256 | | explicit SpecialSkipListFactory(int num_entries_flush) |
257 | | : num_entries_flush_(num_entries_flush) {} |
258 | | |
259 | | virtual MemTableRep* CreateMemTableRep( |
260 | | const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, |
261 | | const SliceTransform* transform, Logger* logger) override { |
262 | | return new SpecialMemTableRep( |
263 | | allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0), |
264 | | num_entries_flush_); |
265 | | } |
266 | | virtual const char* Name() const override { return "SkipListFactory"; } |
267 | | |
268 | | private: |
269 | | SkipListFactory factory_; |
270 | | int num_entries_flush_; |
271 | | }; |
272 | | |
273 | | // Special Env used to delay background operations |
274 | | class SpecialEnv : public EnvWrapper { |
275 | | public: |
276 | | explicit SpecialEnv(Env* base); |
277 | | |
278 | | Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r, |
279 | 179k | const EnvOptions& soptions) override { |
280 | 179k | class SSTableFile : public WritableFile { |
281 | 179k | private: |
282 | 179k | SpecialEnv* env_; |
283 | 179k | unique_ptr<WritableFile> base_; |
284 | | |
285 | 179k | public: |
286 | 179k | SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base) |
287 | 179k | : env_(env), base_(std::move(base)) {}97.2k |
288 | 1.94M | Status Append(const Slice& data) override { |
289 | 1.94M | if (env_->table_write_callback_) { |
290 | 349 | (*env_->table_write_callback_)(); |
291 | 349 | } |
292 | 1.94M | if (env_->drop_writes_.load(std::memory_order_acquire)) { |
293 | | // Drop writes on the floor |
294 | 60 | return Status::OK(); |
295 | 1.94M | } else if (env_->no_space_.load(std::memory_order_acquire)) { |
296 | 10 | return STATUS(IOError, "No space left on device"); |
297 | 1.94M | } else { |
298 | 1.94M | env_->bytes_written_ += data.size(); |
299 | 1.94M | return base_->Append(data); |
300 | 1.94M | } |
301 | 1.94M | } |
302 | 179k | Status Truncate(uint64_t size) override { return base_->Truncate(size); }97.0k |
303 | 179k | Status Close() override { |
304 | | // SyncPoint is not supported in Released Windows Mode. |
305 | 97.0k | #if !(defined NDEBUG) || !defined(OS_WIN) |
306 | | // Check preallocation size |
307 | | // preallocation size is never passed to base file. |
308 | 97.0k | size_t preallocation_size = preallocation_block_size(); |
309 | 97.0k | TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus", |
310 | 97.0k | &preallocation_size); |
311 | 97.0k | #endif // !(defined NDEBUG) || !defined(OS_WIN) |
312 | 97.0k | return base_->Close(); |
313 | 97.0k | } |
314 | 2.04M | Status Flush() override { return base_->Flush(); } |
315 | 179k | Status Sync() override { |
316 | 77.3k | ++env_->sync_counter_; |
317 | 77.3k | while (env_->delay_sstable_sync_.load(std::memory_order_acquire)) { |
318 | 28 | env_->SleepForMicroseconds(100000); |
319 | 28 | } |
320 | 77.3k | return base_->Sync(); |
321 | 77.3k | } |
322 | 179k | void SetIOPriority(Env::IOPriority pri) override { |
323 | 76.9k | base_->SetIOPriority(pri); |
324 | 76.9k | } |
325 | 243k | Env::IOPriority GetIOPriority() override { |
326 | 243k | return base_->GetIOPriority(); |
327 | 243k | } |
328 | 179k | }; |
329 | 179k | class ManifestFile : public WritableFile { |
330 | 179k | public: |
331 | 179k | ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b) |
332 | 179k | : env_(env), base_(std::move(b)) {}10.7k |
333 | 179k | Status Append(const Slice& data) override { |
334 | 80.9k | if (env_->manifest_write_error_.load(std::memory_order_acquire)) { |
335 | 1 | return STATUS(IOError, "simulated writer error"); |
336 | 80.9k | } else { |
337 | 80.9k | return base_->Append(data); |
338 | 80.9k | } |
339 | 80.9k | } |
340 | 179k | Status Truncate(uint64_t size) override { return base_->Truncate(size); }10.7k |
341 | 179k | Status Close() override { return base_->Close(); }10.7k |
342 | 179k | Status Flush() override { return base_->Flush(); }149k |
343 | 179k | Status Sync() override { |
344 | 57.6k | ++env_->sync_counter_; |
345 | 57.6k | if (env_->manifest_sync_error_.load(std::memory_order_acquire)) { |
346 | 1 | return STATUS(IOError, "simulated sync error"); |
347 | 57.6k | } else { |
348 | 57.6k | return base_->Sync(); |
349 | 57.6k | } |
350 | 57.6k | } |
351 | 179k | uint64_t GetFileSize() override { return base_->GetFileSize(); }0 |
352 | | |
353 | 179k | private: |
354 | 179k | SpecialEnv* env_; |
355 | 179k | unique_ptr<WritableFile> base_; |
356 | 179k | }; |
357 | 179k | class WalFile : public WritableFile { |
358 | 179k | public: |
359 | 179k | WalFile(SpecialEnv* env, unique_ptr<WritableFile>&& b) |
360 | 179k | : env_(env), base_(std::move(b)) {}29.6k |
361 | 16.9M | Status Append(const Slice& data) override { |
362 | 16.9M | #if !(defined NDEBUG) || !defined(OS_WIN) |
363 | 16.9M | TEST_SYNC_POINT("SpecialEnv::WalFile::Append:1"); |
364 | 16.9M | #endif |
365 | 16.9M | Status s; |
366 | 16.9M | if (env_->log_write_error_.load(std::memory_order_acquire)) { |
367 | 2 | s = STATUS(IOError, "simulated writer error"); |
368 | 16.9M | } else { |
369 | 16.9M | int slowdown = |
370 | 16.9M | env_->log_write_slowdown_.load(std::memory_order_acquire); |
371 | 16.9M | if (slowdown > 0) { |
372 | 55.0k | env_->SleepForMicroseconds(slowdown); |
373 | 55.0k | } |
374 | 16.9M | s = base_->Append(data); |
375 | 16.9M | } |
376 | 16.9M | #if !(defined NDEBUG) || !defined(OS_WIN) |
377 | 16.9M | TEST_SYNC_POINT("SpecialEnv::WalFile::Append:2"); |
378 | 16.9M | #endif |
379 | 16.9M | return s; |
380 | 16.9M | } |
381 | 179k | Status Truncate(uint64_t size) override { return base_->Truncate(size); }29.2k |
382 | 179k | Status Close() override { return base_->Close(); }29.4k |
383 | 16.9M | Status Flush() override { return base_->Flush(); } |
384 | 179k | Status Sync() override { |
385 | 287 | ++env_->sync_counter_; |
386 | 287 | return base_->Sync(); |
387 | 287 | } |
388 | 179k | bool IsSyncThreadSafe() const override { |
389 | 7 | return env_->is_wal_sync_thread_safe_.load(); |
390 | 7 | } |
391 | | |
392 | 179k | private: |
393 | 179k | SpecialEnv* env_; |
394 | 179k | unique_ptr<WritableFile> base_; |
395 | 179k | }; |
396 | | |
397 | 179k | if (non_writeable_rate_.load(std::memory_order_acquire) > 0) { |
398 | 6 | uint32_t random_number; |
399 | 6 | { |
400 | 6 | MutexLock l(&rnd_mutex_); |
401 | 6 | random_number = rnd_.Uniform(100); |
402 | 6 | } |
403 | 6 | if (random_number < non_writeable_rate_.load()) { |
404 | 6 | return STATUS(IOError, "simulated random write error"); |
405 | 6 | } |
406 | 6 | } |
407 | | |
408 | 179k | new_writable_count_++; |
409 | | |
410 | 179k | if (non_writable_count_.load() > 0) { |
411 | 4 | non_writable_count_--; |
412 | 4 | return STATUS(IOError, "simulated write error"); |
413 | 4 | } |
414 | | |
415 | 179k | Status s = target()->NewWritableFile(f, r, soptions); |
416 | 179k | if (s.ok()) { |
417 | 179k | if (strstr(f.c_str(), ".sst") != nullptr) { |
418 | 97.2k | r->reset(new SSTableFile(this, std::move(*r))); |
419 | 97.2k | } else if (82.0k strstr(f.c_str(), "MANIFEST") != nullptr82.0k ) { |
420 | 10.7k | r->reset(new ManifestFile(this, std::move(*r))); |
421 | 71.2k | } else if (strstr(f.c_str(), "log") != nullptr) { |
422 | 29.6k | r->reset(new WalFile(this, std::move(*r))); |
423 | 29.6k | } |
424 | 179k | } |
425 | 179k | return s; |
426 | 179k | } |
427 | | |
428 | | Status NewRandomAccessFile(const std::string& f, |
429 | | unique_ptr<RandomAccessFile>* r, |
430 | 131k | const EnvOptions& soptions) override { |
431 | 131k | class CountingFile : public yb::RandomAccessFileWrapper { |
432 | 131k | public: |
433 | 131k | CountingFile(std::unique_ptr<RandomAccessFile>&& target, |
434 | 131k | anon::AtomicCounter* counter) |
435 | 131k | : RandomAccessFileWrapper(std::move(target)), counter_(counter) {}70 |
436 | | |
437 | 131k | Status Read(uint64_t offset, size_t n, Slice* result, uint8_t* scratch) const override { |
438 | 51.1k | counter_->Increment(); |
439 | 51.1k | return RandomAccessFileWrapper::Read(offset, n, result, scratch); |
440 | 51.1k | } |
441 | | |
442 | 131k | private: |
443 | 131k | anon::AtomicCounter* counter_; |
444 | 131k | }; |
445 | | |
446 | 131k | Status s = target()->NewRandomAccessFile(f, r, soptions); |
447 | 131k | random_file_open_counter_++; |
448 | 131k | if (s.ok() && count_random_reads_131k ) { |
449 | 70 | r->reset(new CountingFile(std::move(*r), &random_read_counter_)); |
450 | 70 | } |
451 | 131k | return s; |
452 | 131k | } |
453 | | |
454 | | Status NewSequentialFile(const std::string& f, unique_ptr<SequentialFile>* r, |
455 | 55.8k | const EnvOptions& soptions) override { |
456 | 55.8k | class CountingFile : public yb::SequentialFileWrapper { |
457 | 55.8k | public: |
458 | 55.8k | CountingFile(std::unique_ptr<SequentialFile>&& target, |
459 | 55.8k | anon::AtomicCounter* counter) |
460 | 55.8k | : yb::SequentialFileWrapper(std::move(target)), counter_(counter) {}0 |
461 | | |
462 | 55.8k | Status Read(size_t n, Slice* result, uint8_t* scratch) override { |
463 | 0 | counter_->Increment(); |
464 | 0 | return SequentialFileWrapper::Read(n, result, scratch); |
465 | 0 | } |
466 | | |
467 | 55.8k | private: |
468 | 55.8k | anon::AtomicCounter* counter_; |
469 | 55.8k | }; |
470 | | |
471 | 55.8k | Status s = target()->NewSequentialFile(f, r, soptions); |
472 | 55.8k | if (s.ok()55.8k && count_sequential_reads_) { |
473 | 0 | r->reset(new CountingFile(std::move(*r), &sequential_read_counter_)); |
474 | 0 | } |
475 | 55.8k | return s; |
476 | 55.8k | } |
477 | | |
478 | 61.4k | virtual void SleepForMicroseconds(int micros) override { |
479 | 61.4k | sleep_counter_.Increment(); |
480 | 61.4k | if (no_sleep_ || time_elapse_only_sleep_60.3k ) { |
481 | 1.13k | addon_time_.fetch_add(micros); |
482 | 1.13k | } |
483 | 61.4k | if (!no_sleep_) { |
484 | 60.3k | target()->SleepForMicroseconds(micros); |
485 | 60.3k | } |
486 | 61.4k | } |
487 | | |
488 | 77.4k | virtual Status GetCurrentTime(int64_t* unix_time) override { |
489 | 77.4k | Status s; |
490 | 77.4k | if (!time_elapse_only_sleep_) { |
491 | 77.3k | s = target()->GetCurrentTime(unix_time); |
492 | 77.3k | } |
493 | 77.4k | if (s.ok()77.4k ) { |
494 | 77.4k | *unix_time += addon_time_.load(); |
495 | 77.4k | } |
496 | 77.4k | return s; |
497 | 77.4k | } |
498 | | |
499 | 252k | virtual uint64_t NowNanos() override { |
500 | 252k | return (time_elapse_only_sleep_ ? 05 : target()->NowNanos()252k ) + |
501 | 252k | addon_time_.load() * 1000; |
502 | 252k | } |
503 | | |
504 | 2.40M | virtual uint64_t NowMicros() override { |
505 | 2.40M | return (time_elapse_only_sleep_ ? 080 : target()->NowMicros()2.40M ) + |
506 | 2.40M | addon_time_.load(); |
507 | 2.40M | } |
508 | | |
509 | | Random rnd_; |
510 | | port::Mutex rnd_mutex_; // Lock to pretect rnd_ |
511 | | |
512 | | // sstable Sync() calls are blocked while this pointer is non-nullptr. |
513 | | std::atomic<bool> delay_sstable_sync_; |
514 | | |
515 | | // Drop writes on the floor while this pointer is non-nullptr. |
516 | | std::atomic<bool> drop_writes_; |
517 | | |
518 | | // Simulate no-space errors while this pointer is non-nullptr. |
519 | | std::atomic<bool> no_space_; |
520 | | |
521 | | // Simulate non-writable file system while this pointer is non-nullptr |
522 | | std::atomic<bool> non_writable_; |
523 | | |
524 | | // Force sync of manifest files to fail while this pointer is non-nullptr |
525 | | std::atomic<bool> manifest_sync_error_; |
526 | | |
527 | | // Force write to manifest files to fail while this pointer is non-nullptr |
528 | | std::atomic<bool> manifest_write_error_; |
529 | | |
530 | | // Force write to log files to fail while this pointer is non-nullptr |
531 | | std::atomic<bool> log_write_error_; |
532 | | |
533 | | // Slow down every log write, in micro-seconds. |
534 | | std::atomic<int> log_write_slowdown_; |
535 | | |
536 | | bool count_random_reads_; |
537 | | anon::AtomicCounter random_read_counter_; |
538 | | std::atomic<int> random_file_open_counter_; |
539 | | |
540 | | bool count_sequential_reads_; |
541 | | anon::AtomicCounter sequential_read_counter_; |
542 | | |
543 | | anon::AtomicCounter sleep_counter_; |
544 | | |
545 | | std::atomic<int64_t> bytes_written_; |
546 | | |
547 | | std::atomic<int> sync_counter_; |
548 | | |
549 | | std::atomic<uint32_t> non_writeable_rate_; |
550 | | |
551 | | std::atomic<uint32_t> new_writable_count_; |
552 | | |
553 | | std::atomic<uint32_t> non_writable_count_; |
554 | | |
555 | | std::function<void()>* table_write_callback_; |
556 | | |
557 | | std::atomic<int64_t> addon_time_; |
558 | | |
559 | | bool time_elapse_only_sleep_; |
560 | | |
561 | | bool no_sleep_; |
562 | | |
563 | | std::atomic<bool> is_wal_sync_thread_safe_{true}; |
564 | | }; |
565 | | |
566 | | class DBHolder { |
567 | | protected: |
568 | | // Sequence of option configurations to try |
569 | | enum OptionConfig { |
570 | | kDefault = 0, |
571 | | kBlockBasedTableWithPrefixHashIndex = 1, |
572 | | kBlockBasedTableWithWholeKeyHashIndex = 2, |
573 | | kPlainTableFirstBytePrefix = 3, |
574 | | kPlainTableCappedPrefix = 4, |
575 | | kPlainTableCappedPrefixNonMmap = 5, |
576 | | kPlainTableAllBytesPrefix = 6, |
577 | | kVectorRep = 7, |
578 | | kHashLinkList = 8, |
579 | | kMergePut = 9, |
580 | | kFilter = 10, |
581 | | kFullFilterWithNewTableReaderForCompactions = 11, |
582 | | kUncompressed = 12, |
583 | | kNumLevel_3 = 13, |
584 | | kDBLogDir = 14, |
585 | | kWalDirAndMmapReads = 15, |
586 | | kManifestFileSize = 16, |
587 | | kPerfOptions = 17, |
588 | | kDeletesFilterFirst = 18, |
589 | | kHashSkipList = 19, |
590 | | kUniversalCompaction = 20, |
591 | | kUniversalCompactionMultiLevel = 21, |
592 | | kCompressedBlockCache = 22, |
593 | | kInfiniteMaxOpenFiles = 23, |
594 | | kxxHashChecksum = 24, |
595 | | kFIFOCompaction = 25, |
596 | | kOptimizeFiltersForHits = 26, |
597 | | kRowCache = 27, |
598 | | kRecycleLogFiles = 28, |
599 | | kConcurrentSkipList = 29, |
600 | | kEnd = 30, |
601 | | kLevelSubcompactions = 30, |
602 | | kUniversalSubcompactions = 31, |
603 | | kBlockBasedTableWithIndexRestartInterval = 32, |
604 | | kBlockBasedTableWithThreeSharedPartsKeyDeltaEncoding = 33, |
605 | | }; |
606 | | int option_config_; |
607 | | |
608 | | public: |
609 | | std::string dbname_; |
610 | | std::string alternative_wal_dir_; |
611 | | std::string alternative_db_log_dir_; |
612 | | MockEnv* mem_env_; |
613 | | SpecialEnv* env_; |
614 | | DB* db_; |
615 | | std::vector<ColumnFamilyHandle*> handles_; |
616 | | |
617 | | Options last_options_; |
618 | | |
619 | | // For encryption |
620 | | std::unique_ptr<yb::encryption::UniverseKeyManager> universe_key_manager_; |
621 | | std::unique_ptr<rocksdb::Env> encrypted_env_; |
622 | | |
623 | | static const std::string kKeyId; |
624 | | static const std::string kKeyFile; |
625 | | |
626 | | // Skip some options, as they may not be applicable to a specific test. |
627 | | // To add more skip constants, use values 4, 8, 16, etc. |
628 | | enum OptionSkip { |
629 | | kNoSkip = 0, |
630 | | kSkipDeletesFilterFirst = 1, |
631 | | kSkipUniversalCompaction = 2, |
632 | | kSkipMergePut = 4, |
633 | | kSkipPlainTable = 8, |
634 | | kSkipHashIndex = 16, |
635 | | kSkipNoSeekToLast = 32, |
636 | | kSkipFIFOCompaction = 64, |
637 | | kSkipMmapReads = 128, |
638 | | }; |
639 | | |
640 | | explicit DBHolder(std::string path, bool encryption_enabled = false); |
641 | | |
642 | | virtual ~DBHolder(); |
643 | | |
644 | | void CreateEncryptedEnv(); |
645 | | |
646 | 19.9M | static std::string Key(int i) { |
647 | 19.9M | char buf[100]; |
648 | 19.9M | snprintf(buf, sizeof(buf), "key%06d", i); |
649 | 19.9M | return std::string(buf); |
650 | 19.9M | } |
651 | | |
652 | | static bool ShouldSkipOptions(int option_config, int skip_mask = kNoSkip); |
653 | | |
654 | | // Switch to a fresh database with the next option configuration to |
655 | | // test. Return false if there are no more configurations to test. |
656 | | bool ChangeOptions(int skip_mask = kNoSkip); |
657 | | |
658 | | // Switch between different compaction styles (we have only 2 now). |
659 | | bool ChangeCompactOptions(); |
660 | | |
661 | | // Switch between different filter policy |
662 | | // Jump from kDefault to kFilter to kFullFilter |
663 | | bool ChangeFilterOptions(); |
664 | | |
665 | | // Return the current option configuration. |
666 | | Options CurrentOptions( |
667 | | const anon::OptionsOverride& options_override = anon::OptionsOverride()); |
668 | | |
669 | | Options CurrentOptions( |
670 | | const Options& defaultOptions, |
671 | | const anon::OptionsOverride& options_override = anon::OptionsOverride()); |
672 | | |
673 | 190k | DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); } |
674 | | |
675 | | void CreateColumnFamilies(const std::vector<std::string>& cfs, |
676 | | const Options& options); |
677 | | |
678 | | void CreateAndReopenWithCF(const std::vector<std::string>& cfs, |
679 | | const Options& options); |
680 | | |
681 | | void ReopenWithColumnFamilies(const std::vector<std::string>& cfs, |
682 | | const std::vector<Options>& options); |
683 | | |
684 | | void ReopenWithColumnFamilies(const std::vector<std::string>& cfs, |
685 | | const Options& options); |
686 | | |
687 | | Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs, |
688 | | const std::vector<Options>& options); |
689 | | |
690 | | Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs, |
691 | | const Options& options); |
692 | | |
693 | | void Reopen(const Options& options); |
694 | | |
695 | | void Close(); |
696 | | |
697 | | void DestroyAndReopen(const Options& options); |
698 | | |
699 | | void Destroy(const Options& options); |
700 | | |
701 | | Status ReadOnlyReopen(const Options& options); |
702 | | |
703 | | Status TryReopen(const Options& options); |
704 | | |
705 | | Status Flush(int cf = 0); |
706 | | |
707 | | Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()); |
708 | | |
709 | | Status Put(int cf, const Slice& k, const Slice& v, |
710 | | WriteOptions wo = WriteOptions()); |
711 | | |
712 | | Status Delete(const std::string& k); |
713 | | |
714 | | Status Delete(int cf, const std::string& k); |
715 | | |
716 | | Status SingleDelete(const std::string& k); |
717 | | |
718 | | Status SingleDelete(int cf, const std::string& k); |
719 | | |
720 | | std::string Get(const std::string& k, const Snapshot* snapshot = nullptr); |
721 | | |
722 | | std::string Get(int cf, const std::string& k, |
723 | | const Snapshot* snapshot = nullptr); |
724 | | |
725 | | uint64_t GetNumSnapshots(); |
726 | | |
727 | | uint64_t GetTimeOldestSnapshots(); |
728 | | |
729 | | // Return a string that contains all key,value pairs in order, |
730 | | // formatted like "(k1->v1)(k2->v2)". |
731 | | std::string Contents(int cf = 0); |
732 | | |
733 | | std::string AllEntriesFor(const Slice& user_key, int cf = 0); |
734 | | |
735 | | #ifndef ROCKSDB_LITE |
736 | | int NumSortedRuns(int cf = 0); |
737 | | |
738 | | uint64_t TotalSize(int cf = 0); |
739 | | |
740 | | uint64_t SizeAtLevel(int level); |
741 | | |
742 | | size_t TotalLiveFiles(int cf = 0); |
743 | | |
744 | | size_t CountLiveFiles(); |
745 | | #endif // ROCKSDB_LITE |
746 | | |
747 | | int NumTableFilesAtLevel(int level, int cf = 0); |
748 | | |
749 | | int TotalTableFiles(int cf = 0, int levels = -1); |
750 | | |
751 | | // Return spread of files per level |
752 | | std::string FilesPerLevel(int cf = 0); |
753 | | |
754 | | size_t CountFiles(); |
755 | | |
756 | | uint64_t Size(const Slice& start, const Slice& limit, int cf = 0); |
757 | | |
758 | | void Compact(int cf, const Slice& start, const Slice& limit, |
759 | | uint32_t target_path_id); |
760 | | |
761 | | void Compact(int cf, const Slice& start, const Slice& limit); |
762 | | |
763 | | void Compact(const Slice& start, const Slice& limit); |
764 | | |
765 | | // Do n memtable compactions, each of which produces an sstable |
766 | | // covering the range [small,large]. |
767 | | void MakeTables(int n, const std::string& small, const std::string& large, |
768 | | int cf = 0); |
769 | | |
770 | | // Prevent pushing of new sstables into deeper levels by adding |
771 | | // tables that cover a specified range to all levels. |
772 | | void FillLevels(const std::string& smallest, const std::string& largest, |
773 | | int cf); |
774 | | |
775 | | void MoveFilesToLevel(int level, int cf = 0); |
776 | | |
777 | | void DumpFileCounts(const char* label); |
778 | | |
779 | | std::string DumpSSTableList(); |
780 | | |
781 | | void GetSstFiles(std::string path, std::vector<std::string>* files); |
782 | | |
783 | | int GetSstFileCount(std::string path); |
784 | | |
785 | | // this will generate non-overlapping files since it keeps increasing key_idx |
786 | | void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false); |
787 | | |
788 | | void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false); |
789 | | |
790 | | static const int kNumKeysByGenerateNewRandomFile; |
791 | | static const int KNumKeysByGenerateNewFile = 100; |
792 | | |
793 | | void GenerateNewRandomFile(Random* rnd, bool nowait = false); |
794 | | |
795 | | std::string IterStatus(Iterator* iter); |
796 | | |
797 | | Options OptionsForLogIterTest(); |
798 | | |
799 | | std::string DummyString(size_t len, char c = 'a'); |
800 | | |
801 | | void VerifyIterLast(std::string expected_key, int cf = 0); |
802 | | |
803 | | // Used to test InplaceUpdate |
804 | | |
805 | | // If previous value is nullptr or delta is > than previous value, |
806 | | // sets newValue with delta |
807 | | // If previous value is not empty, |
808 | | // updates previous value with 'b' string of previous value size - 1. |
809 | | static UpdateStatus updateInPlaceSmallerSize(char* prevValue, |
810 | | uint32_t* prevSize, Slice delta, |
811 | | std::string* newValue); |
812 | | |
813 | | static UpdateStatus updateInPlaceSmallerVarintSize(char* prevValue, |
814 | | uint32_t* prevSize, |
815 | | Slice delta, |
816 | | std::string* newValue); |
817 | | |
818 | | static UpdateStatus updateInPlaceLargerSize(char* prevValue, |
819 | | uint32_t* prevSize, Slice delta, |
820 | | std::string* newValue); |
821 | | |
822 | | static UpdateStatus updateInPlaceNoAction(char* prevValue, uint32_t* prevSize, |
823 | | Slice delta, std::string* newValue); |
824 | | |
825 | | // Utility method to test InplaceUpdate |
826 | | void validateNumberOfEntries(int numValues, int cf = 0); |
827 | | |
828 | | void CopyFile(const std::string& source, const std::string& destination, |
829 | | uint64_t size = 0); |
830 | | |
831 | | std::unordered_map<std::string, uint64_t> GetAllSSTFiles( |
832 | | uint64_t* total_size = nullptr); |
833 | | }; |
834 | | |
835 | | class DBTestBase : public RocksDBTest, public DBHolder { |
836 | | public: |
837 | | explicit DBTestBase(std::string path, bool encryption_enabled = false) |
838 | | : DBHolder(std::move(path), encryption_enabled) |
839 | | {} |
840 | | }; |
841 | | |
842 | | } // namespace rocksdb |
843 | | |
844 | | #endif // YB_ROCKSDB_DB_DB_TEST_UTIL_H |