YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/cache.cc
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under the BSD-style license found in the
3
//  LICENSE file in the root directory of this source tree. An additional grant
4
//  of patent rights can be found in the PATENTS file in the same directory.
5
//
6
// The following only applies to changes made to this file as part of YugaByte development.
7
//
8
// Portions Copyright (c) YugaByte, Inc.
9
//
10
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
11
// in compliance with the License.  You may obtain a copy of the License at
12
//
13
// http://www.apache.org/licenses/LICENSE-2.0
14
//
15
// Unless required by applicable law or agreed to in writing, software distributed under the License
16
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
17
// or implied.  See the License for the specific language governing permissions and limitations
18
// under the License.
19
//
20
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
21
// Use of this source code is governed by a BSD-style license that can be
22
// found in the LICENSE file. See the AUTHORS file for names of contributors.
23
24
#include <assert.h>
25
#include <stdio.h>
26
27
#include "yb/rocksdb/cache.h"
28
#include "yb/rocksdb/port/port.h"
29
#include "yb/rocksdb/statistics.h"
30
#include "yb/rocksdb/util/autovector.h"
31
#include "yb/rocksdb/util/hash.h"
32
#include "yb/rocksdb/util/mutexlock.h"
33
#include "yb/rocksdb/util/statistics.h"
34
35
#include "yb/util/cache_metrics.h"
36
#include "yb/util/enums.h"
37
#include "yb/util/metrics.h"
38
#include "yb/util/random_util.h"
39
40
// 0 value means that there exist no single_touch cache and
41
// 1 means that the entire cache is treated as a multi-touch cache.
42
DEFINE_double(cache_single_touch_ratio, 0.2,
43
              "Fraction of the cache dedicated to single-touch items");
44
45
DEFINE_bool(cache_overflow_single_touch, true,
46
            "Whether to enable overflow of single touch cache into the multi touch cache "
47
            "allocation");
48
49
namespace rocksdb {
50
51
6.56M
Cache::~Cache() {
52
6.56M
}
53
54
namespace {
55
56
// LRU cache implementation
57
58
// An entry is a variable length heap-allocated structure.
59
// Entries are referenced by cache and/or by any external entity.
60
// The cache keeps all its entries in table. Some elements
61
// are also stored on LRU list.
62
//
63
// LRUHandle can be in these states:
64
// 1. Referenced externally AND in hash table.
65
//  In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
66
// 2. Not referenced externally and in hash table. In that case the entry is
67
// in the LRU and can be freed. (refs == 1 && in_cache == true)
68
// 3. Referenced externally and not in hash table. In that case the entry is
69
// in not on LRU and not in table. (refs >= 1 && in_cache == false)
70
//
71
// All newly created LRUHandles are in state 1. If you call LRUCache::Release
72
// on entry in state 1, it will go into state 2. To move from state 1 to
73
// state 3, either call LRUCache::Erase or LRUCache::Insert with the same key.
74
// To move from state 2 to state 1, use LRUCache::Lookup.
75
// Before destruction, make sure that no handles are in state 1. This means
76
// that any successful LRUCache::Lookup/LRUCache::Insert have a matching
77
// RUCache::Release (to move into state 2) or LRUCache::Erase (for state 3)
78
//
79
// LRU also supports scan resistant access by allowing it to be one of two
80
// caches. query_id is to detect that multiple touches from the same query will not
81
// upgrade the cache element from single touch LRU into the multiple touch LRU.
82
// query_id == kInMultiTouchId means that the handle is in the multi
83
// touch cache, which means that this item will be evicted only for new values
84
// that are accessed multiple times by different queries.
85
// query_id == kNoCacheQueryId means that this Handle is not going to be added
86
// into the cache.
87
88
struct LRUHandle {
89
  void* value;
90
  void (*deleter)(const Slice&, void* value);
91
  LRUHandle* next_hash;
92
  LRUHandle* next;
93
  LRUHandle* prev;
94
  size_t charge;      // TODO(opt): Only allow uint32_t?
95
  size_t key_length;
96
  uint32_t refs;      // a number of refs to this entry
97
                      // cache itself is counted as 1
98
  bool in_cache;      // true, if this entry is referenced by the hash table
99
  uint32_t hash;      // Hash of key(); used for fast sharding and comparisons
100
  QueryId query_id;  // Query id that added the value to the cache.
101
  char key_data[1];   // Beginning of key
102
103
112M
  Slice key() const {
104
    // For cheaper lookups, we allow a temporary Handle object
105
    // to store a pointer to a key in "value".
106
112M
    if (next == this) {
107
0
      return *(reinterpret_cast<Slice*>(value));
108
112M
    } else {
109
112M
      return Slice(key_data, key_length);
110
112M
    }
111
112M
  }
112
113
2.42M
  void Free(yb::CacheMetrics* metrics) {
114
2.42M
    assert((refs == 1 && in_cache) || (refs == 0 && !in_cache));
115
0
    (*deleter)(key(), value);
116
2.42M
    if (metrics != nullptr) {
117
657
      if (GetSubCacheType() == MULTI_TOUCH) {
118
179
        metrics->multi_touch_cache_usage->DecrementBy(charge);
119
481
      } else 
if (478
GetSubCacheType() == SINGLE_TOUCH478
) {
120
481
        metrics->single_touch_cache_usage->DecrementBy(charge);
121
481
      }
122
657
      metrics->cache_usage->DecrementBy(charge);
123
657
    }
124
2.42M
    delete[] reinterpret_cast<char*>(this);
125
2.42M
  }
126
127
568M
  SubCacheType GetSubCacheType() const {
128
568M
    return (query_id == kInMultiTouchId) ? 
MULTI_TOUCH329M
:
SINGLE_TOUCH239M
;
129
568M
  }
130
};
131
132
// We provide our own simple hash table since it removes a whole bunch
133
// of porting hacks and is also faster than some of the built-in hash
134
// table implementations in some of the compiler/runtime combinations
135
// we have tested.  E.g., readrandom speeds up by ~5% over the g++
136
// 4.4.3's builtin hashtable.
137
class HandleTable {
138
 public:
139
  HandleTable() :
140
128M
      length_(0), elems_(0), list_(nullptr), metrics_(nullptr) { Resize(); }
141
142
  template <typename T>
143
126M
  void ApplyToAllCacheEntries(T func) {
144
2.15G
    for (uint32_t i = 0; i < length_; 
i++2.02G
) {
145
2.02G
      LRUHandle* h = list_[i];
146
2.02G
      while (h != nullptr) {
147
523k
        auto n = h->next_hash;
148
523k
        assert(h->in_cache);
149
0
        func(h);
150
523k
        h = n;
151
523k
      }
152
2.02G
    }
153
126M
  }
cache.cc:void rocksdb::(anonymous namespace)::HandleTable::ApplyToAllCacheEntries<rocksdb::(anonymous namespace)::HandleTable::~HandleTable()::'lambda'(rocksdb::(anonymous namespace)::LRUHandle*)>(rocksdb::(anonymous namespace)::HandleTable::~HandleTable()::'lambda'(rocksdb::(anonymous namespace)::LRUHandle*))
Line
Count
Source
143
126M
  void ApplyToAllCacheEntries(T func) {
144
2.15G
    for (uint32_t i = 0; i < length_; 
i++2.02G
) {
145
2.02G
      LRUHandle* h = list_[i];
146
2.02G
      while (h != nullptr) {
147
523k
        auto n = h->next_hash;
148
523k
        assert(h->in_cache);
149
0
        func(h);
150
523k
        h = n;
151
523k
      }
152
2.02G
    }
153
126M
  }
cache.cc:void rocksdb::(anonymous namespace)::HandleTable::ApplyToAllCacheEntries<rocksdb::(anonymous namespace)::LRUCache::ApplyToAllCacheEntries(void (*)(void*, unsigned long), bool)::$_0>(rocksdb::(anonymous namespace)::LRUCache::ApplyToAllCacheEntries(void (*)(void*, unsigned long), bool)::$_0)
Line
Count
Source
143
16
  void ApplyToAllCacheEntries(T func) {
144
272
    for (uint32_t i = 0; i < length_; 
i++256
) {
145
256
      LRUHandle* h = list_[i];
146
266
      while (h != nullptr) {
147
10
        auto n = h->next_hash;
148
10
        assert(h->in_cache);
149
0
        func(h);
150
10
        h = n;
151
10
      }
152
256
    }
153
16
  }
154
155
126M
  ~HandleTable() {
156
126M
    ApplyToAllCacheEntries([this](LRUHandle* h) {
157
523k
      if (h->refs == 1) {
158
523k
        h->Free(metrics_.get());
159
523k
      }
160
523k
    });
161
126M
    delete[] list_;
162
126M
  }
163
164
107M
  LRUHandle* Lookup(const Slice& key, uint32_t hash) const {
165
107M
    return *FindPointer(key, hash);
166
107M
  }
167
168
275k
  void SetMetrics(shared_ptr<yb::CacheMetrics> metrics) { metrics_ = metrics; }
169
170
  // Checks if the newly created handle is a candidate to be inserted into the multi touch cache.
171
  // It checks to see if the same value is in the multi touch cache, or if it is in the single
172
  // touch cache, checks to see if the query ids are different.
173
2.89M
  SubCacheType GetSubCacheTypeCandidate(LRUHandle* h) {
174
2.89M
    if (h->GetSubCacheType() == MULTI_TOUCH) {
175
118k
      return MULTI_TOUCH;
176
118k
    }
177
178
2.77M
    LRUHandle* val = Lookup(h->key(), h->hash);
179
2.77M
    if (val != nullptr && 
(1.99k
val->GetSubCacheType() == MULTI_TOUCH1.99k
||
val->query_id != h->query_id1.79k
)) {
180
415
      h->query_id = kInMultiTouchId;
181
415
      return MULTI_TOUCH;
182
415
    }
183
2.77M
    return SINGLE_TOUCH;
184
2.77M
  }
185
186
2.89M
  LRUHandle* Insert(LRUHandle* h) {
187
2.89M
    LRUHandle** ptr = FindPointer(h->key(), h->hash);
188
2.89M
    LRUHandle* old = *ptr;
189
2.89M
    h->next_hash = (old == nullptr ? 
nullptr2.89M
:
old->next_hash2.04k
);
190
2.89M
    *ptr = h;
191
2.89M
    if (old == nullptr) {
192
2.89M
      ++elems_;
193
2.89M
      if (elems_ > length_) {
194
        // Since each cache entry is fairly large, we aim for a small
195
        // average linked list length (<= 1).
196
20.9k
        Resize();
197
20.9k
      }
198
2.89M
    }
199
2.89M
    return old;
200
2.89M
  }
201
202
1.90M
  LRUHandle* Remove(const Slice& key, uint32_t hash) {
203
1.90M
    LRUHandle** ptr = FindPointer(key, hash);
204
1.90M
    LRUHandle* result = *ptr;
205
1.90M
    if (result != nullptr) {
206
1.90M
      *ptr = result->next_hash;
207
1.90M
      --elems_;
208
1.90M
    }
209
1.90M
    return result;
210
1.90M
  }
211
212
 private:
213
  // The table consists of an array of buckets where each bucket is
214
  // a linked list of cache entries that hash into the bucket.
215
  uint32_t length_;
216
  uint32_t elems_;
217
  LRUHandle** list_;
218
  shared_ptr<yb::CacheMetrics> metrics_;
219
220
  // Return a pointer to slot that points to a cache entry that
221
  // matches key/hash.  If there is no such cache entry, return a
222
  // pointer to the trailing slot in the corresponding linked list.
223
112M
  LRUHandle** FindPointer(const Slice& key, uint32_t hash) const {
224
112M
    LRUHandle** ptr = &list_[hash & (length_ - 1)];
225
140M
    while (*ptr != nullptr &&
226
140M
           
(130M
(*ptr)->hash != hash130M
||
key != (*ptr)->key()102M
)) {
227
28.3M
      ptr = &(*ptr)->next_hash;
228
28.3M
    }
229
112M
    return ptr;
230
112M
  }
231
232
128M
  void Resize() {
233
128M
    uint32_t new_length = 16;
234
128M
    while (new_length < elems_ * 1.5) {
235
39.1k
      new_length *= 2;
236
39.1k
    }
237
128M
    LRUHandle** new_list = new LRUHandle*[new_length];
238
128M
    memset(new_list, 0, sizeof(new_list[0]) * new_length);
239
128M
    uint32_t count = 0;
240
128M
    LRUHandle* h;
241
128M
    LRUHandle* next;
242
128M
    LRUHandle** ptr;
243
128M
    uint32_t hash;
244
129M
    for (uint32_t i = 0; i < length_; 
i++1.14M
) {
245
1.14M
      h = list_[i];
246
2.31M
      while (h != nullptr) {
247
1.16M
        next = h->next_hash;
248
1.16M
        hash = h->hash;
249
1.16M
        ptr = &new_list[hash & (new_length - 1)];
250
1.16M
        h->next_hash = *ptr;
251
1.16M
        *ptr = h;
252
1.16M
        h = next;
253
1.16M
        count++;
254
1.16M
      }
255
1.14M
    }
256
128M
    assert(elems_ == count);
257
0
    delete[] list_;
258
128M
    list_ = new_list;
259
128M
    length_ = new_length;
260
128M
  }
261
};
262
263
// Sub-cache of the LRUCache that is used to track different LRU pointers, capacity and usage.
264
class LRUSubCache {
265
 public:
266
  LRUSubCache();
267
  ~LRUSubCache();
268
269
  // Accessors.
270
777M
  size_t Usage() const {
271
777M
    return usage_;
272
777M
  }
273
274
204
  size_t LRU_Usage() const {
275
204
    return lru_usage_;
276
204
  }
277
278
487k
  LRUHandle& LRU_Head() {
279
487k
    return lru_;
280
487k
  }
281
282
  // Checks if the head of the LRU linked list is pointing to itself,
283
  // meaning that LRU list is empty.
284
3.22M
  bool IsLRUEmpty() const {
285
3.22M
    return lru_.next == &lru_;
286
3.22M
  }
287
288
93.1k
  size_t GetPinnedUsage() const {
289
93.1k
    assert(usage_ >= lru_usage_);
290
0
    return usage_ - lru_usage_;
291
93.1k
  }
292
293
2.05M
  void DecrementUsage(const size_t charge) {
294
2.05M
    assert(usage_ >= charge);
295
0
    usage_ -= charge;
296
2.05M
  }
297
298
3.04M
  void IncrementUsage(const size_t charge) {
299
3.04M
    assert(usage_ + charge > 0);
300
0
    usage_ += charge;
301
3.04M
  }
302
303
  void LRU_Remove(LRUHandle* e);
304
  void LRU_Append(LRUHandle *e);
305
306
 private:
307
  // Dummy heads of single-touch and multi-touch LRU list.
308
  // lru.prev is newest entry, lru.next is oldest entry.
309
  // LRU contains items which can be evicted, ie referenced only by cache.
310
  LRUHandle lru_;
311
312
  // Memory size for entries residing in the cache.
313
  // Includes entries in the LRU list and referenced by callers and thus not eligible for cleanup.
314
  size_t usage_;
315
316
  // Memory size for entries residing only in the LRU list
317
  size_t lru_usage_;
318
};
319
320
256M
LRUSubCache::LRUSubCache() : usage_(0), lru_usage_(0) {
321
  // Make empty circular linked list
322
256M
  lru_.next = &lru_;
323
256M
  lru_.prev = &lru_;
324
256M
}
325
326
253M
LRUSubCache::~LRUSubCache() {}
327
328
// Remove the handle from the LRU list of the sub cache.
329
88.2M
void LRUSubCache::LRU_Remove(LRUHandle* e) {
330
88.2M
  assert(e->next != nullptr);
331
0
  assert(e->prev != nullptr);
332
0
  e->next->prev = e->prev;
333
88.2M
  e->prev->next = e->next;
334
88.2M
  e->prev = e->next = nullptr;
335
88.2M
  lru_usage_ -= e->charge;
336
88.2M
}
337
338
// Append to the LRU header of the sub cache.
339
89.2M
void LRUSubCache::LRU_Append(LRUHandle *e) {
340
89.2M
  assert(e->next == nullptr);
341
0
  assert(e->next == nullptr);
342
0
  e->next = &lru_;
343
89.2M
  e->prev = lru_.prev;
344
89.2M
  e->prev->next = e;
345
89.2M
  e->next->prev = e;
346
89.2M
  lru_usage_ += e->charge;
347
89.2M
}
348
349
class LRUHandleDeleter {
350
 public:
351
131M
  explicit LRUHandleDeleter(yb::CacheMetrics* metrics) : metrics_(metrics) {}
352
353
488k
  void Add(LRUHandle* handle) {
354
488k
    handles_.push_back(handle);
355
488k
  }
356
357
0
  size_t TotalCharge() const {
358
0
    size_t result = 0;
359
0
    for (LRUHandle* handle : handles_) {
360
0
      result += handle->charge;
361
0
    }
362
0
    return result;
363
0
  }
364
365
131M
  ~LRUHandleDeleter() {
366
131M
    for (LRUHandle* handle : handles_) {
367
488k
      handle->Free(metrics_);
368
488k
    }
369
131M
  }
370
371
 private:
372
  yb::CacheMetrics* metrics_;
373
  autovector<LRUHandle*> handles_;
374
};
375
376
// A single shard of sharded cache.
377
class LRUCache {
378
 public:
379
  LRUCache();
380
  ~LRUCache();
381
382
  // Separate from constructor so caller can easily make an array of LRUCache
383
  // if current usage is more than new capacity, the function will attempt to
384
  // free the needed space
385
  void SetCapacity(size_t capacity);
386
387
275k
  void SetMetrics(shared_ptr<yb::CacheMetrics> metrics) {
388
275k
    metrics_ = metrics;
389
275k
    table_.SetMetrics(metrics);
390
275k
  }
391
392
  // Set the flag to reject insertion if cache if full.
393
  void SetStrictCapacityLimit(bool strict_capacity_limit);
394
395
  // Like Cache methods, but with an extra "hash" parameter.
396
  Status Insert(const Slice& key, uint32_t hash, const QueryId query_id,
397
                void* value, size_t charge, void (*deleter)(const Slice& key, void* value),
398
                Cache::Handle** handle, Statistics* statistics);
399
  Cache::Handle* Lookup(const Slice& key, uint32_t hash, const QueryId query_id,
400
                        Statistics* statistics = nullptr);
401
  void Release(Cache::Handle* handle);
402
  void Erase(const Slice& key, uint32_t hash);
403
  size_t Evict(size_t required);
404
405
  // Although in some platforms the update of size_t is atomic, to make sure
406
  // GetUsage() and GetPinnedUsage() work correctly under any platform, we'll
407
  // protect them with mutex_.
408
409
26.0k
  size_t GetUsage() const {
410
26.0k
    MutexLock l(&mutex_);
411
26.0k
    return single_touch_sub_cache_.Usage() + multi_touch_sub_cache_.Usage();
412
26.0k
  }
413
414
46.5k
  size_t GetPinnedUsage() const {
415
46.5k
    MutexLock l(&mutex_);
416
46.5k
    return single_touch_sub_cache_.GetPinnedUsage() + multi_touch_sub_cache_.GetPinnedUsage();
417
46.5k
  }
418
419
  void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
420
                              bool thread_safe);
421
422
7
  std::pair<size_t, size_t> TEST_GetIndividualUsages() {
423
7
    return std::pair<size_t, size_t>(
424
7
        single_touch_sub_cache_.Usage(), multi_touch_sub_cache_.Usage());
425
7
  }
426
427
 private:
428
  void LRU_Remove(LRUHandle* e);
429
  void LRU_Append(LRUHandle* e);
430
431
  // Returns the correct SubCache based on the input argument.
432
  LRUSubCache* GetSubCache(const SubCacheType subcache_type);
433
  LRUSubCache single_touch_sub_cache_;
434
  LRUSubCache multi_touch_sub_cache_;
435
436
  size_t total_capacity_;
437
  size_t multi_touch_capacity_;
438
439
  // Just reduce the reference count by 1.
440
  // Return true if last reference
441
  bool Unref(LRUHandle* e);
442
443
  // Returns the capacity of the subcache.
444
  // For multi touch cache it is the same as its initial allocation.
445
  // For single touch cache it is the amount of space left in the entire cache.
446
  size_t GetSubCacheCapacity(const SubCacheType subcache_type);
447
448
  // Free some space following strict LRU policy until enough space
449
  // to hold (usage_ + charge) is freed or the lru list is empty.
450
  // This function is not thread safe - it needs to be executed while
451
  // holding the mutex_
452
  void EvictFromLRU(size_t charge, LRUHandleDeleter* deleted, SubCacheType subcache_type);
453
454
  void DecrementUsage(const SubCacheType subcache_type, const size_t charge);
455
456
  // Checks if the corresponding subcache contains space.
457
  bool HasFreeSpace(const SubCacheType subcache_type);
458
459
169M
  size_t TotalUsage() const {
460
169M
    return single_touch_sub_cache_.Usage() + multi_touch_sub_cache_.Usage();
461
169M
  }
462
463
  // Whether to reject insertion if cache reaches its full capacity.
464
  bool strict_capacity_limit_ = false;
465
466
  // mutex_ protects the following state.
467
  // We don't count mutex_ as the cache's internal state so semantically we
468
  // don't mind mutex_ invoking the non-const actions.
469
  mutable port::Mutex mutex_;
470
471
  HandleTable table_;
472
473
  shared_ptr<yb::CacheMetrics> metrics_;
474
};
475
476
128M
LRUCache::LRUCache() {}
477
478
126M
LRUCache::~LRUCache() {}
479
480
104M
bool LRUCache::Unref(LRUHandle* e) {
481
104M
  assert(e->refs > 0);
482
0
  e->refs--;
483
104M
  return e->refs == 0;
484
104M
}
485
486
543M
LRUSubCache* LRUCache::GetSubCache(const SubCacheType subcache_type) {
487
543M
  if (FLAGS_cache_single_touch_ratio == 0) {
488
902
    return &multi_touch_sub_cache_;
489
543M
  } else if (FLAGS_cache_single_touch_ratio == 1) {
490
902
    return &single_touch_sub_cache_;
491
902
  }
492
543M
  return (subcache_type == SubCacheType::MULTI_TOUCH) ? 
&multi_touch_sub_cache_285M
:
493
543M
                                                        
&single_touch_sub_cache_257M
;
494
543M
}
495
496
49.2k
void LRUCache::DecrementUsage(const SubCacheType subcache_type, const size_t charge) {
497
49.2k
  GetSubCache(subcache_type)->DecrementUsage(charge);
498
49.2k
}
499
500
// Call deleter and free
501
502
void LRUCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
503
16
                                      bool thread_safe) {
504
16
  if (thread_safe) {
505
16
    mutex_.Lock();
506
16
  }
507
16
  table_.ApplyToAllCacheEntries([callback](LRUHandle* h) {
508
10
    callback(h->value, h->charge);
509
10
  });
510
16
  if (thread_safe) {
511
16
    mutex_.Unlock();
512
16
  }
513
16
}
514
515
87.8M
void LRUCache::LRU_Remove(LRUHandle* e) {
516
87.8M
  GetSubCache(e->GetSubCacheType())->LRU_Remove(e);
517
87.8M
}
518
519
89.2M
void LRUCache::LRU_Append(LRUHandle* e) {
520
  // Make "e" newest entry by inserting just before lru_
521
89.2M
  GetSubCache(e->GetSubCacheType())->LRU_Append(e);
522
89.2M
}
523
524
260M
size_t LRUCache::GetSubCacheCapacity(const SubCacheType subcache_type) {
525
260M
  switch (subcache_type) {
526
131M
    case SINGLE_TOUCH :
527
131M
      if (
strict_capacity_limit_131M
|| !FLAGS_cache_overflow_single_touch) {
528
447
        return total_capacity_ - multi_touch_capacity_;
529
447
      }
530
131M
      return total_capacity_ - multi_touch_sub_cache_.Usage();
531
128M
    case MULTI_TOUCH :
532
128M
      return multi_touch_capacity_;
533
260M
  }
534
0
  FATAL_INVALID_ENUM_VALUE(SubCacheType, subcache_type);
535
0
}
536
537
void LRUCache::EvictFromLRU(const size_t charge,
538
                            LRUHandleDeleter* deleted,
539
260M
                            const SubCacheType subcache_type) {
540
260M
  LRUSubCache* sub_cache =  GetSubCache(subcache_type);
541
260M
  const size_t capacity = GetSubCacheCapacity(subcache_type);
542
260M
  while (sub_cache->Usage() + charge > capacity && 
!sub_cache->IsLRUEmpty()1.85M
) {
543
487k
    LRUHandle* old = sub_cache->LRU_Head().next;
544
487k
    assert(old->in_cache);
545
0
    assert(old->refs == 1);  // LRU list contains elements which may be evicted
546
0
    sub_cache->LRU_Remove(old);
547
487k
    table_.Remove(old->key(), old->hash);
548
487k
    old->in_cache = false;
549
487k
    Unref(old);
550
487k
    sub_cache->DecrementUsage(old->charge);
551
487k
    deleted->Add(old);
552
487k
  }
553
260M
}
554
555
128M
void LRUCache::SetCapacity(size_t capacity) {
556
128M
  LRUHandleDeleter last_reference_list(metrics_.get());
557
558
128M
  {
559
128M
    MutexLock l(&mutex_);
560
128M
    multi_touch_capacity_ = round((1 - FLAGS_cache_single_touch_ratio) * capacity);
561
128M
    total_capacity_ = capacity;
562
128M
    EvictFromLRU(0, &last_reference_list, MULTI_TOUCH);
563
128M
    EvictFromLRU(0, &last_reference_list, SINGLE_TOUCH);
564
128M
  }
565
128M
}
566
567
128M
void LRUCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
568
128M
  MutexLock l(&mutex_);
569
  // Allow setting strict capacity limit only when there are no elements in the cache.
570
  // This is because we disable overflowing single touch cache when strict_capacity_limit_ is true.
571
  // We cannot ensure that single touch cache has not already overflown when the cache already has
572
  // elements in it.
573
128M
  assert(TotalUsage() == 0 || !FLAGS_cache_overflow_single_touch);
574
0
  strict_capacity_limit_ = strict_capacity_limit;
575
128M
}
576
577
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash, const QueryId query_id,
578
104M
                                Statistics* statistics)  {
579
104M
  MutexLock l(&mutex_);
580
104M
  LRUHandle* e = table_.Lookup(key, hash);
581
104M
  if (e != nullptr) {
582
100M
    assert(e->in_cache);
583
    // Since the entry is now referenced externally, cannot be evicted, so remove from LRU.
584
100M
    if (e->refs == 1) {
585
87.7M
      LRU_Remove(e);
586
87.7M
    }
587
    // Increase the number of references and move to state 1. (in cache and not in LRU)
588
100M
    e->refs++;
589
590
    // Now the handle will be added to the multi touch pool only if it exists.
591
100M
    if (FLAGS_cache_single_touch_ratio < 1 && 
e->GetSubCacheType() != MULTI_TOUCH100M
&&
592
100M
        
e->query_id != query_id42.8M
) {
593
151k
      {
594
151k
        LRUHandleDeleter multi_touch_eviction_list(metrics_.get());
595
151k
        EvictFromLRU(e->charge, &multi_touch_eviction_list, MULTI_TOUCH);
596
151k
      }
597
      // Cannot have any single touch elements in this case.
598
151k
      assert(FLAGS_cache_single_touch_ratio != 0);
599
151k
      if (!strict_capacity_limit_ ||
600
151k
          multi_touch_sub_cache_.Usage() - multi_touch_sub_cache_.LRU_Usage() + e->charge <=
601
151k
          multi_touch_capacity_) {
602
151k
        e->query_id = kInMultiTouchId;
603
151k
        single_touch_sub_cache_.DecrementUsage(e->charge);
604
151k
        multi_touch_sub_cache_.IncrementUsage(e->charge);
605
151k
       if (metrics_) {
606
145k
         metrics_->multi_touch_cache_usage->IncrementBy(e->charge);
607
145k
         metrics_->single_touch_cache_usage->DecrementBy(e->charge);
608
145k
       }
609
151k
      }
610
151k
    }
611
100M
    if (statistics != nullptr) {
612
      // overall cache hit
613
62.6M
      RecordTick(statistics, BLOCK_CACHE_HIT);
614
      // total bytes read from cache
615
62.6M
      RecordTick(statistics, BLOCK_CACHE_BYTES_READ, e->charge);
616
62.6M
      if (e->GetSubCacheType() == SubCacheType::SINGLE_TOUCH) {
617
29.8M
        RecordTick(statistics, BLOCK_CACHE_SINGLE_TOUCH_HIT);
618
29.8M
        RecordTick(statistics, BLOCK_CACHE_SINGLE_TOUCH_BYTES_READ, e->charge);
619
32.7M
      } else if (e->GetSubCacheType() == SubCacheType::MULTI_TOUCH) {
620
32.7M
        RecordTick(statistics, BLOCK_CACHE_MULTI_TOUCH_HIT);
621
32.7M
        RecordTick(statistics, BLOCK_CACHE_MULTI_TOUCH_BYTES_READ, e->charge);
622
32.7M
      }
623
62.6M
    }
624
100M
  } else {
625
4.08M
    if (statistics != nullptr) {
626
668k
      RecordTick(statistics, BLOCK_CACHE_MISS);
627
668k
    }
628
4.08M
  }
629
630
104M
  if (metrics_ != nullptr) {
631
54.5M
    metrics_->lookups->Increment();
632
54.5M
    bool was_hit = (e != nullptr);
633
54.5M
    if (was_hit) {
634
53.9M
      metrics_->cache_hits->Increment();
635
53.9M
    } else {
636
665k
      metrics_->cache_misses->Increment();
637
665k
    }
638
54.5M
  }
639
104M
  return reinterpret_cast<Cache::Handle*>(e);
640
104M
}
641
642
90.2M
bool LRUCache::HasFreeSpace(const SubCacheType subcache_type) {
643
90.2M
  switch(subcache_type) {
644
40.7M
    case SINGLE_TOUCH :
645
40.7M
      if (strict_capacity_limit_ || 
!FLAGS_cache_overflow_single_touch40.7M
) {
646
216
        return single_touch_sub_cache_.Usage() <= (total_capacity_ - multi_touch_capacity_);
647
216
      }
648
40.7M
      return TotalUsage() <= total_capacity_;
649
49.5M
    case MULTI_TOUCH : return multi_touch_sub_cache_.Usage() <= multi_touch_capacity_;
650
90.2M
  }
651
0
  FATAL_INVALID_ENUM_VALUE(SubCacheType, subcache_type);
652
0
}
653
654
103M
void LRUCache::Release(Cache::Handle* handle) {
655
103M
  if (handle == nullptr) {
656
0
    return;
657
0
  }
658
103M
  LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
659
103M
  bool last_reference = false;
660
103M
  {
661
103M
    MutexLock l(&mutex_);
662
103M
    LRUSubCache* sub_cache = GetSubCache(e->GetSubCacheType());
663
103M
    last_reference = Unref(e);
664
103M
    if (last_reference) {
665
750
      sub_cache->DecrementUsage(e->charge);
666
750
    }
667
103M
    if (e->refs == 1 && 
e->in_cache90.2M
) {
668
      // The item is still in cache, and nobody else holds a reference to it
669
90.2M
      if (!HasFreeSpace(e->GetSubCacheType())) {
670
        // The LRU list must be empty since the cache is full.
671
1.36M
        assert(sub_cache->IsLRUEmpty());
672
        // take this opportunity and remove the item
673
0
        table_.Remove(e->key(), e->hash);
674
1.36M
        e->in_cache = false;
675
1.36M
        Unref(e);
676
1.36M
        sub_cache->DecrementUsage(e->charge);
677
1.36M
        last_reference = true;
678
88.9M
      } else {
679
        // put the item on the list to be potentially freed.
680
88.9M
        LRU_Append(e);
681
88.9M
      }
682
90.2M
    }
683
103M
  }
684
685
  // free outside of mutex
686
103M
  if (last_reference) {
687
1.36M
    e->Free(metrics_.get());
688
1.36M
  }
689
103M
}
690
691
0
size_t LRUCache::Evict(size_t required) {
692
0
  LRUHandleDeleter evicted(metrics_.get());
693
0
  {
694
0
    MutexLock l(&mutex_);
695
0
    EvictFromLRU(required, &evicted, SINGLE_TOUCH);
696
0
    if (required > evicted.TotalCharge()) {
697
0
      EvictFromLRU(required, &evicted, MULTI_TOUCH);
698
0
    }
699
0
  }
700
0
  return evicted.TotalCharge();
701
0
}
702
703
Status LRUCache::Insert(const Slice& key, uint32_t hash, const QueryId query_id,
704
                        void* value, size_t charge, void (*deleter)(const Slice& key, void* value),
705
2.89M
                        Cache::Handle** handle, Statistics* statistics) {
706
  // Don't use the cache if disabled by the caller using the special query id.
707
2.89M
  if (query_id == kNoCacheQueryId) {
708
0
    return Status::OK();
709
0
  }
710
  // Allocate the memory here outside of the mutex
711
  // If the cache is full, we'll have to release it
712
  // It shouldn't happen very often though.
713
2.89M
  LRUHandle* e = reinterpret_cast<LRUHandle*>(
714
2.89M
                    new char[sizeof(LRUHandle) - 1 + key.size()]);
715
2.89M
  Status s;
716
2.89M
  LRUHandleDeleter last_reference_list(metrics_.get());
717
718
2.89M
  e->value = value;
719
2.89M
  e->deleter = deleter;
720
2.89M
  e->charge = charge;
721
2.89M
  e->key_length = key.size();
722
2.89M
  e->hash = hash;
723
2.89M
  e->refs = (handle == nullptr
724
2.89M
                 ? 
1355k
725
2.89M
                 : 
22.54M
); // One from LRUCache, one for the returned handle
726
2.89M
  e->next = e->prev = nullptr;
727
2.89M
  e->in_cache = true;
728
  // Adding query id to the handle.
729
2.89M
  e->query_id = query_id;
730
2.89M
  memcpy(e->key_data, key.data(), key.size());
731
732
2.89M
  {
733
2.89M
    MutexLock l(&mutex_);
734
    // Free the space following strict LRU policy until enough space
735
    // is freed or the lru list is empty.
736
    // Check if there is a single touch cache.
737
2.89M
    SubCacheType subcache_type;
738
2.89M
    if (FLAGS_cache_single_touch_ratio == 0) {
739
100
      e->query_id = kInMultiTouchId;
740
100
      subcache_type = MULTI_TOUCH;
741
2.89M
    } else if (FLAGS_cache_single_touch_ratio == 1) {
742
      // If there is no multi touch cache, default to single cache.
743
100
      subcache_type = SINGLE_TOUCH;
744
2.89M
    } else {
745
2.89M
      subcache_type = table_.GetSubCacheTypeCandidate(e);
746
2.89M
    }
747
2.89M
    EvictFromLRU(charge, &last_reference_list, subcache_type);
748
2.89M
    LRUSubCache* sub_cache = GetSubCache(subcache_type);
749
    // If the cache no longer has any more space in the given pool.
750
2.89M
    if (strict_capacity_limit_ &&
751
2.89M
        
sub_cache->Usage() - sub_cache->LRU_Usage() + charge > GetSubCacheCapacity(subcache_type)204
) {
752
2
      if (handle == nullptr) {
753
1
        last_reference_list.Add(e);
754
1
      } else {
755
1
        delete[] reinterpret_cast<char*>(e);
756
1
        *handle = nullptr;
757
1
      }
758
2
      s = STATUS(Incomplete, "Insert failed due to LRU cache being full.");
759
2.89M
    } else {
760
      // insert into the cache
761
      // note that the cache might get larger than its capacity if not enough
762
      // space was freed
763
2.89M
      LRUHandle* old = table_.Insert(e);
764
2.89M
      sub_cache->IncrementUsage(e->charge);
765
2.89M
      if (old != nullptr) {
766
2.00k
        old->in_cache = false;
767
2.00k
        if (Unref(old)) {
768
1.26k
          DecrementUsage(old->GetSubCacheType(), old->charge);
769
          // old is on LRU because it's in cache and its reference count
770
          // was just 1 (Unref returned 0)
771
1.26k
          LRU_Remove(old);
772
1.26k
          last_reference_list.Add(old);
773
1.26k
        }
774
2.00k
      }
775
      // No external reference, so put it in LRU to be potentially evicted.
776
2.89M
      if (handle == nullptr) {
777
355k
        LRU_Append(e);
778
2.54M
      } else {
779
2.54M
        *handle = reinterpret_cast<Cache::Handle*>(e);
780
2.54M
      }
781
2.89M
      if (subcache_type == MULTI_TOUCH && 
FLAGS_cache_single_touch_ratio != 0119k
) {
782
        // Evict entries from single touch cache if the total size increases. This can happen if
783
        // single touch entries has overflown and we insert entries directly into the multi touch
784
        // cache without it going through the single touch cache.
785
118k
        EvictFromLRU(0, &last_reference_list, SINGLE_TOUCH);
786
118k
      }
787
2.89M
      s = Status::OK();
788
2.89M
    }
789
2.89M
    if (statistics != nullptr) {
790
479k
      if (
s.ok()479k
) {
791
479k
        RecordTick(statistics, BLOCK_CACHE_ADD);
792
479k
        RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, charge);
793
479k
        if (subcache_type == SubCacheType::SINGLE_TOUCH) {
794
478k
          RecordTick(statistics, BLOCK_CACHE_SINGLE_TOUCH_ADD);
795
478k
          RecordTick(statistics, BLOCK_CACHE_SINGLE_TOUCH_BYTES_WRITE, charge);
796
478k
        } else 
if (994
subcache_type == SubCacheType::MULTI_TOUCH994
) {
797
988
          RecordTick(statistics, BLOCK_CACHE_MULTI_TOUCH_ADD);
798
988
          RecordTick(statistics, BLOCK_CACHE_MULTI_TOUCH_BYTES_WRITE, charge);
799
988
        }
800
18.4E
      } else {
801
18.4E
        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
802
18.4E
      }
803
479k
    }
804
2.89M
    if (metrics_ != nullptr) {
805
466k
      if (subcache_type == MULTI_TOUCH) {
806
372
        metrics_->multi_touch_cache_usage->IncrementBy(charge);
807
465k
      } else {
808
465k
        metrics_->single_touch_cache_usage->IncrementBy(charge);
809
465k
      }
810
466k
      metrics_->cache_usage->IncrementBy(charge);
811
466k
    }
812
2.89M
  }
813
814
2.89M
  return s;
815
2.89M
}
816
817
49.7k
void LRUCache::Erase(const Slice& key, uint32_t hash) {
818
49.7k
  LRUHandle* e;
819
49.7k
  bool last_reference = false;
820
49.7k
  {
821
49.7k
    MutexLock l(&mutex_);
822
49.7k
    e = table_.Remove(key, hash);
823
49.7k
    if (e != nullptr) {
824
47.9k
      last_reference = Unref(e);
825
47.9k
      if (last_reference) {
826
47.9k
        DecrementUsage(e->GetSubCacheType(), e->charge);
827
47.9k
      }
828
47.9k
      if (last_reference && 
e->in_cache47.9k
) {
829
47.9k
        LRU_Remove(e);
830
47.9k
      }
831
47.9k
      e->in_cache = false;
832
47.9k
    }
833
49.7k
  }
834
  // mutex not held here
835
  // last_reference will only be true if e != nullptr
836
49.7k
  if (last_reference) {
837
47.9k
    e->Free(metrics_.get());
838
47.9k
  }
839
49.7k
}
840
841
static int kNumShardBits = 4;          // default values, can be overridden
842
843
class ShardedLRUCache : public Cache {
844
 private:
845
  LRUCache* shards_;
846
  port::Mutex id_mutex_;
847
  port::Mutex capacity_mutex_;
848
  uint64_t last_id_;
849
  size_t num_shard_bits_;
850
  size_t capacity_;
851
  bool strict_capacity_limit_;
852
  shared_ptr<yb::CacheMetrics> metrics_;
853
854
107M
  static inline uint32_t HashSlice(const Slice& s) {
855
107M
    return Hash(s.data(), s.size(), 0);
856
107M
  }
857
858
210M
  uint32_t Shard(uint32_t hash) {
859
    // Note, hash >> 32 yields hash in gcc, not the zero we expect!
860
210M
    return (num_shard_bits_ > 0) ? 
(hash >> (32 - num_shard_bits_))210M
:
04.99k
;
861
210M
  }
862
863
107M
  bool IsValidQueryId(const QueryId query_id) {
864
107M
    return query_id >= 0 || 
query_id == kInMultiTouchId9.32M
||
query_id == kNoCacheQueryId0
;
865
107M
  }
866
867
 public:
868
  ShardedLRUCache(size_t capacity, int num_shard_bits,
869
                  bool strict_capacity_limit)
870
      : last_id_(0),
871
        num_shard_bits_(num_shard_bits),
872
        capacity_(capacity),
873
        strict_capacity_limit_(strict_capacity_limit),
874
6.65M
        metrics_(nullptr) {
875
6.65M
    int num_shards = 1 << num_shard_bits_;
876
6.65M
    shards_ = new LRUCache[num_shards];
877
6.65M
    const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
878
135M
    for (int s = 0; s < num_shards; 
s++128M
) {
879
128M
      shards_[s].SetStrictCapacityLimit(strict_capacity_limit);
880
128M
      shards_[s].SetCapacity(per_shard);
881
128M
    }
882
6.65M
  }
883
884
6.56M
  virtual ~ShardedLRUCache() {
885
6.56M
    delete[] shards_;
886
6.56M
  }
887
888
3
  void SetCapacity(size_t capacity) override {
889
3
    int num_shards = 1 << num_shard_bits_;
890
3
    const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
891
3
    MutexLock l(&capacity_mutex_);
892
6
    for (int s = 0; s < num_shards; 
s++3
) {
893
3
      shards_[s].SetCapacity(per_shard);
894
3
    }
895
3
    capacity_ = capacity;
896
3
  }
897
898
  virtual Status Insert(const Slice& key, const QueryId query_id, void* value, size_t charge,
899
                        void (*deleter)(const Slice& key, void* value),
900
2.89M
                        Handle** handle, Statistics* statistics) override {
901
2.89M
    DCHECK(IsValidQueryId(query_id));
902
    // Queries with no cache query ids are not cached.
903
2.89M
    if (query_id == kNoCacheQueryId) {
904
0
      return Status::OK();
905
0
    }
906
2.89M
    const uint32_t hash = HashSlice(key);
907
2.89M
    return shards_[Shard(hash)].Insert(key, hash, query_id, value, charge, deleter,
908
2.89M
                                       handle, statistics);
909
2.89M
  }
910
911
0
  size_t Evict(size_t bytes_to_evict) override {
912
0
    auto num_shards = 1ULL << num_shard_bits_;
913
0
    size_t total_evicted = 0;
914
    // Start at random shard.
915
0
    auto index = Shard(yb::RandomUniformInt<uint32_t>());
916
0
    for (size_t i = 0; bytes_to_evict > total_evicted && i != num_shards; ++i) {
917
0
      total_evicted += shards_[index].Evict(bytes_to_evict - total_evicted);
918
0
      index = (index + 1) & (num_shards - 1);
919
0
    }
920
0
    return total_evicted;
921
0
  }
922
923
104M
  Handle* Lookup(const Slice& key, const QueryId query_id, Statistics* statistics) override {
924
104M
    DCHECK(IsValidQueryId(query_id));
925
104M
    if (query_id == kNoCacheQueryId) {
926
0
      return nullptr;
927
0
    }
928
104M
    const uint32_t hash = HashSlice(key);
929
104M
    return shards_[Shard(hash)].Lookup(key, hash, query_id, statistics);
930
104M
  }
931
932
103M
  void Release(Handle* handle) override {
933
103M
    LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
934
103M
    shards_[Shard(h->hash)].Release(handle);
935
103M
  }
936
937
49.7k
  void Erase(const Slice& key) override {
938
49.7k
    const uint32_t hash = HashSlice(key);
939
49.7k
    shards_[Shard(hash)].Erase(key, hash);
940
49.7k
  }
941
942
100M
  void* Value(Handle* handle) override {
943
100M
    return reinterpret_cast<LRUHandle*>(handle)->value;
944
100M
  }
945
946
151k
  uint64_t NewId() override {
947
151k
    MutexLock l(&id_mutex_);
948
151k
    return ++(last_id_);
949
151k
  }
950
951
16.6k
  size_t GetCapacity() const override { return capacity_; }
952
953
0
  bool HasStrictCapacityLimit() const override {
954
0
    return strict_capacity_limit_;
955
0
  }
956
957
124
  size_t GetUsage() const override {
958
    // We will not lock the cache when getting the usage from shards.
959
124
    int num_shards = 1 << num_shard_bits_;
960
124
    size_t usage = 0;
961
26.1k
    for (int s = 0; s < num_shards; 
s++26.0k
) {
962
26.0k
      usage += shards_[s].GetUsage();
963
26.0k
    }
964
124
    return usage;
965
124
  }
966
967
0
  size_t GetUsage(Handle* handle) const override {
968
0
    return reinterpret_cast<LRUHandle*>(handle)->charge;
969
0
  }
970
971
186
  size_t GetPinnedUsage() const override {
972
    // We will not lock the cache when getting the usage from shards.
973
186
    int num_shards = 1 << num_shard_bits_;
974
186
    size_t usage = 0;
975
46.7k
    for (int s = 0; s < num_shards; 
s++46.5k
) {
976
46.5k
      usage += shards_[s].GetPinnedUsage();
977
46.5k
    }
978
186
    return usage;
979
186
  }
980
981
205
  SubCacheType GetSubCacheType(Handle* e) const override {
982
205
    LRUHandle* h = reinterpret_cast<LRUHandle*>(e);
983
205
    return h->GetSubCacheType();
984
205
  }
985
986
0
  void DisownData() override {
987
0
    shards_ = nullptr;
988
0
  }
989
990
  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
991
1
                                      bool thread_safe) override {
992
1
    int num_shards = 1 << num_shard_bits_;
993
17
    for (int s = 0; s < num_shards; 
s++16
) {
994
16
      shards_[s].ApplyToAllCacheEntries(callback, thread_safe);
995
16
    }
996
1
  }
997
998
17.2k
  virtual void SetMetrics(const scoped_refptr<yb::MetricEntity>& entity) override {
999
17.2k
    int num_shards = 1 << num_shard_bits_;
1000
17.2k
    metrics_ = std::make_shared<yb::CacheMetrics>(entity);
1001
292k
    for (int s = 0; s < num_shards; 
s++275k
) {
1002
275k
      shards_[s].SetMetrics(metrics_);
1003
275k
    }
1004
17.2k
  }
1005
1006
7
  virtual std::vector<std::pair<size_t, size_t>> TEST_GetIndividualUsages() override {
1007
7
    std::vector<std::pair<size_t, size_t>> cache_sizes;
1008
7
    cache_sizes.reserve(1 << num_shard_bits_);
1009
1010
14
    for (int i = 0; i < 1 << num_shard_bits_; 
++i7
) {
1011
7
      cache_sizes.emplace_back(shards_[i].TEST_GetIndividualUsages());
1012
7
    }
1013
7
    return cache_sizes;
1014
7
  }
1015
};
1016
1017
}  // end anonymous namespace
1018
1019
6.20M
shared_ptr<Cache> NewLRUCache(size_t capacity) {
1020
6.20M
  return NewLRUCache(capacity, kNumShardBits, false);
1021
6.20M
}
1022
1023
452k
shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits) {
1024
452k
  return NewLRUCache(capacity, num_shard_bits, false);
1025
452k
}
1026
1027
shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
1028
6.65M
                              bool strict_capacity_limit) {
1029
6.65M
  if (num_shard_bits >= 20) {
1030
0
    return nullptr;  // the cache cannot be sharded into too many fine pieces
1031
0
  }
1032
6.65M
  return std::make_shared<ShardedLRUCache>(capacity, num_shard_bits,
1033
6.65M
                                           strict_capacity_limit);
1034
6.65M
}
1035
1036
}  // namespace rocksdb