YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/rocksdb/cache.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
// This source code is licensed under the BSD-style license found in the
3
// LICENSE file in the root directory of this source tree. An additional grant
4
// of patent rights can be found in the PATENTS file in the same directory.
5
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
6
// Use of this source code is governed by a BSD-style license that can be
7
// found in the LICENSE file. See the AUTHORS file for names of contributors.
8
//
9
// The following only applies to changes made to this file as part of YugaByte development.
10
//
11
// Portions Copyright (c) YugaByte, Inc.
12
//
13
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
14
// in compliance with the License.  You may obtain a copy of the License at
15
//
16
// http://www.apache.org/licenses/LICENSE-2.0
17
//
18
// Unless required by applicable law or agreed to in writing, software distributed under the License
19
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
20
// or implied.  See the License for the specific language governing permissions and limitations
21
// under the License.
22
//
23
// A Cache is an interface that maps keys to values.  It has internal
24
// synchronization and may be safely accessed concurrently from
25
// multiple threads.  It may automatically evict entries to make room
26
// for new entries.  Values have a specified charge against the cache
27
// capacity.  For example, a cache where the values are variable
28
// length strings, may use the length of the string as the charge for
29
// the string.
30
//
31
// A builtin cache implementation with a least-recently-used eviction
32
// policy is provided.  Clients may use their own implementations if
33
// they want something more sophisticated (like scan-resistance, a
34
// custom eviction policy, variable cache sizing, etc.)
35
36
#ifndef YB_ROCKSDB_CACHE_H
37
#define YB_ROCKSDB_CACHE_H
38
39
#include <stdint.h>
40
41
#include <memory>
42
43
#include "yb/rocksdb/statistics.h"
44
#include "yb/rocksdb/status.h"
45
46
#include "yb/util/slice.h"
47
48
namespace rocksdb {
49
50
using std::shared_ptr;
51
52
// Classifies the type of the subcache.
53
enum SubCacheType {
54
  SINGLE_TOUCH,
55
  MULTI_TOUCH
56
};
57
58
class Cache;
59
60
// Create a new cache with a fixed size capacity. The cache is sharded
61
// to 2^num_shard_bits shards, by hash of the key. The total capacity
62
// is divided and evenly assigned to each shard.
63
//
64
// The parameter num_shard_bits defaults to 4, and strict_capacity_limit
65
// defaults to false.
66
extern shared_ptr<Cache> NewLRUCache(size_t capacity);
67
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits);
68
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
69
                                     bool strict_capacity_limit);
70
71
using QueryId = int64_t;
72
// Query ids to represent values for the default query id.
73
constexpr QueryId kDefaultQueryId = 0;
74
// Query ids to represent values that should be in multi-touch cache.
75
constexpr QueryId kInMultiTouchId = -1;
76
// Query ids to represent values that should not be in any cache.
77
constexpr QueryId kNoCacheQueryId = -2;
78
79
class Cache {
80
 public:
81
5.21M
  Cache() { }
82
83
  // Destroys all existing entries by calling the "deleter"
84
  // function that was passed via the Insert() function.
85
  //
86
  // @See Insert
87
  virtual ~Cache();
88
89
  // Opaque handle to an entry stored in the cache.
90
  struct Handle { };
91
92
  // Insert a mapping from key->value into the cache and assign it
93
  // the specified charge against the total cache capacity.
94
  // If strict_capacity_limit is true and cache reaches its full capacity,
95
  // return Status::Incomplete.
96
  //
97
  // If handle is not nullptr, returns a handle that corresponds to the
98
  // mapping. The caller must call this->Release(handle) when the returned
99
  // mapping is no longer needed. In case of error caller is responsible to
100
  // cleanup the value (i.e. calling "deleter").
101
  //
102
  // If handle is nullptr, it is as if Release is called immediately after
103
  // insert. In case of error value will be cleanup.
104
  //
105
  // When the inserted entry is no longer needed, the key and
106
  // value will be passed to "deleter".
107
  // The query ids will allow the cache values to be included in the
108
  // single touch or multi touch cache, which gives scan resistance to the
109
  // cache.
110
  virtual Status Insert(const Slice& key, const QueryId query_id,
111
                        void* value, size_t charge,
112
                        void (*deleter)(const Slice& key, void* value),
113
                        Handle** handle = nullptr,
114
                        Statistics* statistics = nullptr) = 0;
115
116
  // If the cache has no mapping for "key", returns nullptr.
117
  //
118
  // Else return a handle that corresponds to the mapping.  The caller
119
  // must call this->Release(handle) when the returned mapping is no
120
  // longer needed.
121
  virtual Handle* Lookup(const Slice& key, const QueryId query_id,
122
                         Statistics* statistics = nullptr) = 0;
123
124
  // Release a mapping returned by a previous Lookup().
125
  // REQUIRES: handle must not have been released yet.
126
  // REQUIRES: handle must have been returned by a method on *this.
127
  virtual void Release(Handle* handle) = 0;
128
129
  // Return the value encapsulated in a handle returned by a
130
  // successful Lookup().
131
  // REQUIRES: handle must not have been released yet.
132
  // REQUIRES: handle must have been returned by a method on *this.
133
  virtual void* Value(Handle* handle) = 0;
134
135
  // If the cache contains entry for key, erase it.  Note that the
136
  // underlying entry will be kept around until all existing handles
137
  // to it have been released.
138
  virtual void Erase(const Slice& key) = 0;
139
140
  // Return a new numeric id.  May be used by multiple clients who are
141
  // sharing the same cache to partition the key space.  Typically the
142
  // client will allocate a new id at startup and prepend the id to
143
  // its cache keys.
144
  virtual uint64_t NewId() = 0;
145
146
  // sets the maximum configured capacity of the cache. When the new
147
  // capacity is less than the old capacity and the existing usage is
148
  // greater than new capacity, the implementation will do its best job to
149
  // purge the released entries from the cache in order to lower the usage
150
  virtual void SetCapacity(size_t capacity) = 0;
151
152
  // Set whether to return error on insertion when cache reaches its full
153
  // capacity.
154
  virtual bool HasStrictCapacityLimit() const = 0;
155
156
  // returns the maximum configured capacity of the cache
157
  virtual size_t GetCapacity() const = 0;
158
159
  // returns the memory size for the entries residing in the cache.
160
  virtual size_t GetUsage() const = 0;
161
162
  // returns the memory size for a specific entry in the cache.
163
  virtual size_t GetUsage(Handle* handle) const = 0;
164
165
  // returns the memory size for the entries in use by the system
166
  virtual size_t GetPinnedUsage() const = 0;
167
168
  // Gets the sub_cache type of the handle.
169
0
  virtual SubCacheType GetSubCacheType(Handle* e) const {
170
0
    // Default implementation assumes multi-touch.
171
0
    return MULTI_TOUCH;
172
0
  }
173
174
  // Call this on shutdown if you want to speed it up. Cache will disown
175
  // any underlying data and will not free it on delete. This call will leak
176
  // memory - call this only if you're shutting down the process.
177
  // Any attempts of using cache after this call will fail terribly.
178
  // Always delete the DB object before calling this method!
179
0
  virtual void DisownData() {
180
0
    // default implementation is noop
181
0
  }
182
183
  // Apply callback to all entries in the cache
184
  // If thread_safe is true, it will also lock the accesses. Otherwise, it will
185
  // access the cache without the lock held
186
  virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
187
                                      bool thread_safe) = 0;
188
189
  virtual void SetMetrics(const scoped_refptr<yb::MetricEntity>& entity) = 0;
190
191
  // Tries to evict specified amount of bytes from cache.
192
0
  virtual size_t Evict(size_t required) { return 0; }
193
194
  // Returns the single-touch and multi-touch cache usages for each of the shard.
195
  virtual std::vector<std::pair<size_t, size_t>> TEST_GetIndividualUsages() = 0;
196
197
 private:
198
  void LRU_Remove(Handle* e);
199
  void LRU_Append(Handle* e);
200
  void Unref(Handle* e);
201
202
  // No copying allowed
203
  Cache(const Cache&);
204
  void operator=(const Cache&);
205
};
206
207
}  // namespace rocksdb
208
209
#endif  // YB_ROCKSDB_CACHE_H