/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/cache_test.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
21 | | // Use of this source code is governed by a BSD-style license that can be |
22 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
23 | | |
24 | | #include <forward_list> |
25 | | #include <string> |
26 | | #include <vector> |
27 | | |
28 | | #include <gtest/gtest.h> |
29 | | |
30 | | #include "yb/rocksdb/cache.h" |
31 | | #include "yb/rocksdb/util/coding.h" |
32 | | |
33 | | #include "yb/util/string_util.h" |
34 | | #include "yb/util/test_macros.h" |
35 | | #include "yb/rocksdb/util/testutil.h" |
36 | | |
37 | | DECLARE_double(cache_single_touch_ratio); |
38 | | |
39 | | namespace rocksdb { |
40 | | |
41 | | // Conversions between numeric keys/values and the types expected by Cache. |
42 | 8.01k | static std::string EncodeKey(int k) { |
43 | 8.01k | std::string result; |
44 | 8.01k | PutFixed32(&result, k); |
45 | 8.01k | return result; |
46 | 8.01k | } |
47 | 3.90k | static int DecodeKey(const Slice& k) { |
48 | 3.90k | assert(k.size() == 4); |
49 | 3.90k | return DecodeFixed32(k.data()); |
50 | 3.90k | } |
51 | 3.90k | static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); } |
52 | 7.79k | static int DecodeValue(void* v) { |
53 | 7.79k | return static_cast<int>(reinterpret_cast<uintptr_t>(v)); |
54 | 7.79k | } |
55 | | |
56 | | class CacheTest : public RocksDBTest { |
57 | | public: |
58 | | static CacheTest* current_; |
59 | | |
60 | 3.90k | static void Deleter(const Slice& key, void* v) { |
61 | 3.90k | current_->deleted_keys_.push_back(DecodeKey(key)); |
62 | 3.90k | current_->deleted_values_.push_back(DecodeValue(v)); |
63 | 3.90k | } |
64 | | |
65 | | static const int kCacheSize = 1000; |
66 | | static const int kNumShardBits = 4; |
67 | | |
68 | | static const int kCacheSize2 = 100; |
69 | | static const int kNumShardBits2 = 2; |
70 | | |
71 | | static const QueryId kTestQueryId = 1; |
72 | | |
73 | | std::vector<int> deleted_keys_; |
74 | | std::vector<int> deleted_values_; |
75 | | shared_ptr<Cache> cache_; |
76 | | shared_ptr<Cache> cache2_; |
77 | | |
78 | | CacheTest() : |
79 | | cache_(NewLRUCache(kCacheSize, kNumShardBits)), |
80 | 19 | cache2_(NewLRUCache(kCacheSize2, kNumShardBits2)) { |
81 | 19 | current_ = this; |
82 | 19 | } |
83 | | |
84 | 19 | ~CacheTest() { |
85 | 19 | } |
86 | | |
87 | 3.89k | int Lookup(shared_ptr<Cache> cache, int key, QueryId query_id = kTestQueryId) { |
88 | 3.89k | Cache::Handle* handle = cache->Lookup(EncodeKey(key), query_id); |
89 | 3.67k | const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle)); |
90 | 3.89k | if (handle != nullptr) { |
91 | 3.67k | cache->Release(handle); |
92 | 3.67k | } |
93 | 3.89k | return r; |
94 | 3.89k | } |
95 | | |
96 | | bool LookupAndCheckInMultiTouch(shared_ptr<Cache> cache, int key, |
97 | | int expected_value, |
98 | 206 | QueryId query_id = kTestQueryId) { |
99 | 206 | Cache::Handle* handle = cache->Lookup(EncodeKey(key), query_id); |
100 | 206 | if (handle != nullptr) { |
101 | 205 | const int r = DecodeValue(cache->Value(handle)); |
102 | 205 | SubCacheType subcache_type = cache->GetSubCacheType(handle); |
103 | 205 | cache->Release(handle); |
104 | 205 | return (subcache_type == MULTI_TOUCH && r == expected_value); |
105 | 1 | } else { |
106 | 1 | return false; |
107 | 1 | } |
108 | 206 | } |
109 | | |
110 | | Status Insert(shared_ptr<Cache> cache, int key, int value, int charge = 1, |
111 | 3.90k | QueryId query_id = kTestQueryId) { |
112 | 3.90k | return cache->Insert(EncodeKey(key), query_id, EncodeValue(value), charge, |
113 | 3.90k | &CacheTest::Deleter); |
114 | 3.90k | } |
115 | | |
116 | 5 | void Erase(shared_ptr<Cache> cache, int key) { |
117 | 5 | cache->Erase(EncodeKey(key)); |
118 | 5 | } |
119 | | |
120 | | |
121 | 3.69k | int Lookup(int key, QueryId query_id = kTestQueryId) { |
122 | 3.69k | return Lookup(cache_, key, query_id); |
123 | 3.69k | } |
124 | | |
125 | 6 | bool LookupAndCheckInMultiTouch(int key, int expected_value, QueryId query_id = kTestQueryId) { |
126 | 6 | return LookupAndCheckInMultiTouch(cache_, key, expected_value, query_id); |
127 | 6 | } |
128 | | |
129 | 3.70k | Status Insert(int key, int value, int charge = 1, QueryId query_id = kTestQueryId) { |
130 | 3.70k | return Insert(cache_, key, value, charge, query_id); |
131 | 3.70k | } |
132 | | |
133 | 5 | void Erase(int key) { |
134 | 5 | Erase(cache_, key); |
135 | 5 | } |
136 | | |
137 | 0 | int Lookup2(int key) { |
138 | 0 | return Lookup(cache2_, key); |
139 | 0 | } |
140 | | |
141 | 0 | Status Insert2(int key, int value, int charge = 1) { |
142 | 0 | return Insert(cache2_, key, value, charge); |
143 | 0 | } |
144 | | |
145 | 0 | void Erase2(int key) { |
146 | 0 | Erase(cache2_, key); |
147 | 0 | } |
148 | | }; |
149 | | CacheTest* CacheTest::current_; |
150 | | |
151 | | namespace { |
152 | 300k | void dumbDeleter(const Slice& key, void* value) { } |
153 | | } // namespace |
154 | | |
155 | 1 | TEST_F(CacheTest, UsageTest) { |
156 | | // cache is shared_ptr and will be automatically cleaned up. |
157 | 1 | const uint64_t kCapacity = 100000; |
158 | 1 | auto cache = NewLRUCache(kCapacity, 8); |
159 | | |
160 | 1 | size_t usage = 0; |
161 | 1 | char value[10] = "abcdef"; |
162 | | // make sure everything will be cached |
163 | 100 | for (int i = 1; i < 100; ++i) { |
164 | 99 | std::string key(i, 'a'); |
165 | 99 | auto kv_size = key.size() + 5; |
166 | 99 | ASSERT_OK(cache->Insert(key, kTestQueryId, reinterpret_cast<void*>(value), |
167 | 99 | kv_size, dumbDeleter)); |
168 | 99 | usage += kv_size; |
169 | 99 | ASSERT_EQ(usage, cache->GetUsage()); |
170 | 99 | } |
171 | | |
172 | | // make sure the cache will be overloaded |
173 | 100k | for (uint64_t i = 1; i < kCapacity; ++i) { |
174 | 99.9k | auto key = ToString(i); |
175 | 99.9k | ASSERT_OK(cache->Insert( |
176 | 99.9k | key, kTestQueryId, reinterpret_cast<void*>(value), key.size() + 5, dumbDeleter)); |
177 | 99.9k | } |
178 | | |
179 | | // the usage should be close to the capacity |
180 | 1 | ASSERT_GT(kCapacity, cache->GetUsage()); |
181 | 1 | ASSERT_LT(kCapacity * 0.95, cache->GetUsage()); |
182 | 1 | } |
183 | | |
184 | 1 | TEST_F(CacheTest, PinnedUsageTest) { |
185 | | // cache is shared_ptr and will be automatically cleaned up. |
186 | 1 | const uint64_t kCapacity = 100000; |
187 | 1 | auto cache = NewLRUCache(kCapacity / FLAGS_cache_single_touch_ratio, 8); |
188 | | |
189 | 1 | size_t pinned_usage = 0; |
190 | 1 | char value[10] = "abcdef"; |
191 | | |
192 | 1 | std::forward_list<Cache::Handle*> unreleased_handles; |
193 | | |
194 | | // Add entries. Unpin some of them after insertion. Then, pin some of them |
195 | | // again. Check GetPinnedUsage(). |
196 | 100 | for (int i = 1; i < 100; ++i) { |
197 | 99 | std::string key(i, 'a'); |
198 | 99 | auto kv_size = key.size() + 5; |
199 | 99 | Cache::Handle* handle; |
200 | 99 | ASSERT_OK(cache->Insert( |
201 | 99 | key, kTestQueryId, reinterpret_cast<void*>(value), kv_size, dumbDeleter, &handle)); |
202 | 99 | pinned_usage += kv_size; |
203 | 99 | ASSERT_EQ(pinned_usage, cache->GetPinnedUsage()); |
204 | 99 | if (i % 2 == 0) { |
205 | 49 | cache->Release(handle); |
206 | 49 | pinned_usage -= kv_size; |
207 | 49 | ASSERT_EQ(pinned_usage, cache->GetPinnedUsage()); |
208 | 50 | } else { |
209 | 50 | unreleased_handles.push_front(handle); |
210 | 50 | } |
211 | 99 | if (i % 3 == 0) { |
212 | 33 | unreleased_handles.push_front(cache->Lookup(key, kTestQueryId)); |
213 | | // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned |
214 | | // usage increased |
215 | 33 | if (i % 2 == 0) { |
216 | 16 | pinned_usage += kv_size; |
217 | 16 | } |
218 | 33 | ASSERT_EQ(pinned_usage, cache->GetPinnedUsage()); |
219 | 33 | } |
220 | 99 | } |
221 | | |
222 | | // check that overloading the cache does not change the pinned usage |
223 | 200k | for (uint64_t i = 1; i < 2 * kCapacity; ++i) { |
224 | 199k | auto key = ToString(i); |
225 | 199k | ASSERT_OK(cache->Insert( |
226 | 199k | key, kTestQueryId, reinterpret_cast<void*>(value), key.size() + 5, dumbDeleter)); |
227 | 199k | } |
228 | 1 | ASSERT_EQ(pinned_usage, cache->GetPinnedUsage()); |
229 | | |
230 | | // release handles for pinned entries to prevent memory leaks |
231 | 83 | for (auto handle : unreleased_handles) { |
232 | 83 | cache->Release(handle); |
233 | 83 | } |
234 | 1 | } |
235 | | |
236 | 1 | TEST_F(CacheTest, HitAndMiss) { |
237 | 1 | ASSERT_EQ(-1, Lookup(100)); |
238 | | |
239 | 1 | ASSERT_OK(Insert(100, 101)); |
240 | 1 | ASSERT_EQ(101, Lookup(100)); |
241 | 1 | ASSERT_EQ(-1, Lookup(200)); |
242 | 1 | ASSERT_EQ(-1, Lookup(300)); |
243 | | |
244 | 1 | ASSERT_OK(Insert(200, 201)); |
245 | 1 | ASSERT_EQ(101, Lookup(100)); |
246 | 1 | ASSERT_EQ(201, Lookup(200)); |
247 | 1 | ASSERT_EQ(-1, Lookup(300)); |
248 | | |
249 | 1 | ASSERT_OK(Insert(100, 102)); |
250 | 1 | ASSERT_EQ(102, Lookup(100)); |
251 | 1 | ASSERT_EQ(201, Lookup(200)); |
252 | 1 | ASSERT_EQ(-1, Lookup(300)); |
253 | | |
254 | 1 | ASSERT_EQ(1U, deleted_keys_.size()); |
255 | 1 | ASSERT_EQ(100, deleted_keys_[0]); |
256 | 1 | ASSERT_EQ(101, deleted_values_[0]); |
257 | 1 | } |
258 | | |
259 | | |
260 | 1 | TEST_F(CacheTest, Erase) { |
261 | 1 | Erase(200); |
262 | 1 | ASSERT_EQ(0U, deleted_keys_.size()); |
263 | | |
264 | 1 | ASSERT_OK(Insert(100, 101)); |
265 | 1 | ASSERT_OK(Insert(200, 201)); |
266 | 1 | Erase(100); |
267 | 1 | ASSERT_EQ(-1, Lookup(100)); |
268 | 1 | ASSERT_EQ(201, Lookup(200)); |
269 | 1 | ASSERT_EQ(1U, deleted_keys_.size()); |
270 | 1 | ASSERT_EQ(100, deleted_keys_[0]); |
271 | 1 | ASSERT_EQ(101, deleted_values_[0]); |
272 | | |
273 | 1 | Erase(100); |
274 | 1 | ASSERT_EQ(-1, Lookup(100)); |
275 | 1 | ASSERT_EQ(201, Lookup(200)); |
276 | 1 | ASSERT_EQ(1U, deleted_keys_.size()); |
277 | 1 | } |
278 | | |
279 | 1 | TEST_F(CacheTest, EntriesArePinned) { |
280 | 1 | ASSERT_OK(Insert(100, 101)); |
281 | 1 | Cache::Handle* h1 = cache_->Lookup(EncodeKey(100), kTestQueryId); |
282 | 1 | ASSERT_EQ(101, DecodeValue(cache_->Value(h1))); |
283 | 1 | ASSERT_EQ(1U, cache_->GetUsage()); |
284 | | |
285 | 1 | ASSERT_OK(Insert(100, 102)); |
286 | 1 | Cache::Handle* h2 = cache_->Lookup(EncodeKey(100), kTestQueryId); |
287 | 1 | ASSERT_EQ(102, DecodeValue(cache_->Value(h2))); |
288 | 1 | ASSERT_EQ(0U, deleted_keys_.size()); |
289 | 1 | ASSERT_EQ(2U, cache_->GetUsage()); |
290 | | |
291 | 1 | cache_->Release(h1); |
292 | 1 | ASSERT_EQ(1U, deleted_keys_.size()); |
293 | 1 | ASSERT_EQ(100, deleted_keys_[0]); |
294 | 1 | ASSERT_EQ(101, deleted_values_[0]); |
295 | 1 | ASSERT_EQ(1U, cache_->GetUsage()); |
296 | | |
297 | 1 | Erase(100); |
298 | 1 | ASSERT_EQ(-1, Lookup(100)); |
299 | 1 | ASSERT_EQ(1U, deleted_keys_.size()); |
300 | 1 | ASSERT_EQ(1U, cache_->GetUsage()); |
301 | | |
302 | 1 | cache_->Release(h2); |
303 | 1 | ASSERT_EQ(2U, deleted_keys_.size()); |
304 | 1 | ASSERT_EQ(100, deleted_keys_[1]); |
305 | 1 | ASSERT_EQ(102, deleted_values_[1]); |
306 | 1 | ASSERT_EQ(0U, cache_->GetUsage()); |
307 | 1 | } |
308 | | |
309 | 1 | TEST_F(CacheTest, EvictionPolicy) { |
310 | 1 | ASSERT_OK(Insert(100, 101)); |
311 | 1 | ASSERT_OK(Insert(200, 201)); |
312 | | |
313 | | // Frequently used entry must be kept around |
314 | 1.10k | for (int i = 0; i < kCacheSize + 100; i++) { |
315 | 1.10k | ASSERT_OK(Insert(1000 + i, 2000 + i)); |
316 | 1.10k | ASSERT_EQ(2000 + i, Lookup(1000 + i)); |
317 | 1.10k | ASSERT_EQ(101, Lookup(100)); |
318 | 1.10k | } |
319 | 1 | ASSERT_EQ(101, Lookup(100)); |
320 | 1 | ASSERT_EQ(-1, Lookup(200)); |
321 | 1 | } |
322 | | |
323 | 1 | TEST_F(CacheTest, EvictionPolicyRef) { |
324 | 1 | ASSERT_OK(Insert(100, 101)); |
325 | 1 | ASSERT_OK(Insert(101, 102)); |
326 | 1 | ASSERT_OK(Insert(102, 103)); |
327 | 1 | ASSERT_OK(Insert(103, 104)); |
328 | 1 | ASSERT_OK(Insert(200, 101)); |
329 | 1 | ASSERT_OK(Insert(201, 102)); |
330 | 1 | ASSERT_OK(Insert(202, 103)); |
331 | 1 | ASSERT_OK(Insert(203, 104)); |
332 | 1 | Cache::Handle* h201 = cache_->Lookup(EncodeKey(200), kTestQueryId); |
333 | 1 | Cache::Handle* h202 = cache_->Lookup(EncodeKey(201), kTestQueryId); |
334 | 1 | Cache::Handle* h203 = cache_->Lookup(EncodeKey(202), kTestQueryId); |
335 | 1 | Cache::Handle* h204 = cache_->Lookup(EncodeKey(203), kTestQueryId); |
336 | 1 | ASSERT_OK(Insert(300, 101)); |
337 | 1 | ASSERT_OK(Insert(301, 102)); |
338 | 1 | ASSERT_OK(Insert(302, 103)); |
339 | 1 | ASSERT_OK(Insert(303, 104)); |
340 | | |
341 | | // Insert entries much more than Cache capacity |
342 | 1.10k | for (int i = 0; i < kCacheSize + 100; i++) { |
343 | 1.10k | ASSERT_OK(Insert(1000 + i, 2000 + i)); |
344 | 1.10k | } |
345 | | |
346 | | // Check whether the entries inserted in the beginning |
347 | | // are evicted. Ones without extra ref are evicted and |
348 | | // those with are not. |
349 | 1 | ASSERT_EQ(-1, Lookup(100)); |
350 | 1 | ASSERT_EQ(-1, Lookup(101)); |
351 | 1 | ASSERT_EQ(-1, Lookup(102)); |
352 | 1 | ASSERT_EQ(-1, Lookup(103)); |
353 | | |
354 | 1 | ASSERT_EQ(-1, Lookup(300)); |
355 | 1 | ASSERT_EQ(-1, Lookup(301)); |
356 | 1 | ASSERT_EQ(-1, Lookup(302)); |
357 | 1 | ASSERT_EQ(-1, Lookup(303)); |
358 | | |
359 | 1 | ASSERT_EQ(101, Lookup(200)); |
360 | 1 | ASSERT_EQ(102, Lookup(201)); |
361 | 1 | ASSERT_EQ(103, Lookup(202)); |
362 | 1 | ASSERT_EQ(104, Lookup(203)); |
363 | | |
364 | | // Cleaning up all the handles |
365 | 1 | cache_->Release(h201); |
366 | 1 | cache_->Release(h202); |
367 | 1 | cache_->Release(h203); |
368 | 1 | cache_->Release(h204); |
369 | 1 | } |
370 | | |
371 | 1 | TEST_F(CacheTest, ErasedHandleState) { |
372 | | // insert a key and get two handles |
373 | 1 | ASSERT_OK(Insert(100, 1000)); |
374 | 1 | Cache::Handle* h1 = cache_->Lookup(EncodeKey(100), kTestQueryId); |
375 | 1 | Cache::Handle* h2 = cache_->Lookup(EncodeKey(100), kTestQueryId); |
376 | 1 | ASSERT_EQ(h1, h2); |
377 | 1 | ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000); |
378 | 1 | ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000); |
379 | | |
380 | | // delete the key from the cache |
381 | 1 | Erase(100); |
382 | | // can no longer find in the cache |
383 | 1 | ASSERT_EQ(-1, Lookup(100)); |
384 | | |
385 | | // release one handle |
386 | 1 | cache_->Release(h1); |
387 | | // still can't find in cache |
388 | 1 | ASSERT_EQ(-1, Lookup(100)); |
389 | | |
390 | 1 | cache_->Release(h2); |
391 | 1 | } |
392 | | |
393 | 1 | TEST_F(CacheTest, EvictionPolicyAllSingleTouch) { |
394 | 1 | FLAGS_cache_single_touch_ratio = 1; |
395 | 1 | const int kCapacity = 100; |
396 | 1 | auto cache = NewLRUCache(kCapacity, 0, true); |
397 | 1 | Status s; |
398 | 101 | for (int i = 0; i < kCapacity; i++) { |
399 | 100 | ASSERT_OK(Insert(cache, i, i + 1, kTestQueryId)); |
400 | 100 | } |
401 | | |
402 | | // Check that all values are in the cache and are all single touch. |
403 | 101 | for (int i = 0; i < kCapacity; i++) { |
404 | 100 | ASSERT_EQ(i + 1, Lookup(cache, i, kTestQueryId)); |
405 | 100 | ASSERT_FALSE(LookupAndCheckInMultiTouch(cache, i, i + 1)); |
406 | 100 | } |
407 | 1 | ASSERT_EQ(kCapacity, cache->GetUsage()); |
408 | | |
409 | | // Returning the flag back. |
410 | 1 | FLAGS_cache_single_touch_ratio = 0.2; |
411 | 1 | } |
412 | | |
413 | 1 | TEST_F(CacheTest, EvictionPolicyNoSingleTouch) { |
414 | 1 | FLAGS_cache_single_touch_ratio = 0; |
415 | 1 | const int kCapacity = 100; |
416 | 1 | auto cache = NewLRUCache(kCapacity , 0, true); |
417 | 1 | Status s; |
418 | 101 | for (int i = 0; i < kCapacity; i++) { |
419 | 100 | ASSERT_OK(Insert(cache, i, i + 1, kTestQueryId)); |
420 | 100 | } |
421 | | |
422 | | // Check that all values are in the cache and are all multi touch. |
423 | 101 | for (int i = 0; i < kCapacity; i++) { |
424 | 100 | ASSERT_EQ(i + 1, Lookup(cache, i, kTestQueryId)); |
425 | 100 | ASSERT_TRUE(LookupAndCheckInMultiTouch(cache, i, i + 1)); |
426 | 100 | } |
427 | 1 | ASSERT_EQ(kCapacity, cache->GetUsage()); |
428 | | |
429 | | // Returning the flag back. |
430 | 1 | FLAGS_cache_single_touch_ratio = 0.2; |
431 | 1 | } |
432 | | |
433 | 1 | TEST_F(CacheTest, MultiTouch) { |
434 | 1 | ASSERT_EQ(-1, Lookup(100)); |
435 | 1 | ASSERT_FALSE(LookupAndCheckInMultiTouch(100, -1)); |
436 | | |
437 | | // Use two different query ids. |
438 | 1 | QueryId qid1 = 1000; |
439 | 1 | QueryId qid2 = 1001; |
440 | | |
441 | 1 | ASSERT_OK(Insert(100, 101, 1, qid1)); |
442 | 1 | ASSERT_FALSE(LookupAndCheckInMultiTouch(100, 101, qid1)); |
443 | 1 | ASSERT_OK(Insert(100, 101, 1, qid2)); |
444 | 1 | ASSERT_TRUE(LookupAndCheckInMultiTouch(100, 101, qid2)); |
445 | 1 | } |
446 | | |
447 | 1 | TEST_F(CacheTest, EvictionPolicyMultiTouch) { |
448 | 1 | QueryId qid1 = 1000; |
449 | 1 | QueryId qid2 = 1001; |
450 | 1 | QueryId qid3 = 1002; |
451 | 1 | ASSERT_OK(Insert(100, 101, 1, qid1)); |
452 | 1 | ASSERT_OK(Insert(100, 101, 1, qid2)); |
453 | 1 | ASSERT_TRUE(LookupAndCheckInMultiTouch(100, 101, qid2)); |
454 | 1 | ASSERT_OK(Insert(200, 201, 1, qid3)); |
455 | 1 | ASSERT_FALSE(LookupAndCheckInMultiTouch(200, 201, qid3)); |
456 | | |
457 | | // We are adding a single element (key: 100, value: 101) to multi touch cache. |
458 | | // Even if we overload the cache with single touch items, multi touch item should not be evicted. |
459 | 1.10k | for (int i = 0; i < kCacheSize + 100; i++) { |
460 | 1.10k | ASSERT_OK(Insert(1000 + i, 2000 + i)); |
461 | 1.10k | ASSERT_EQ(2000 + i, Lookup(1000 + i)); |
462 | 1.10k | } |
463 | 1 | ASSERT_TRUE(LookupAndCheckInMultiTouch(100, 101, qid2)); |
464 | 1 | ASSERT_EQ(-1, Lookup(200)); |
465 | 1 | ASSERT_LT(kCacheSize * FLAGS_cache_single_touch_ratio, cache_->GetUsage()); |
466 | 1 | } |
467 | | |
468 | 1 | TEST_F(CacheTest, HeavyEntries) { |
469 | | // Add a bunch of light and heavy entries and then count the combined |
470 | | // size of items still in the cache, which must be approximately the |
471 | | // same as the total capacity. |
472 | 1 | const int kLight = 1; |
473 | 1 | const int kHeavy = 10; |
474 | 1 | int added = 0; |
475 | 1 | int index = 0; |
476 | 364 | while (added < 2*kCacheSize) { |
477 | 182 | const int weight = (index & 1) ? kLight : kHeavy; |
478 | 363 | ASSERT_OK(Insert(index, 1000 + index, weight)); |
479 | 363 | added += weight; |
480 | 363 | index++; |
481 | 363 | } |
482 | | |
483 | 1 | int cached_weight = 0; |
484 | 364 | for (int i = 0; i < index; i++) { |
485 | 182 | const int weight = (i & 1 ? kLight : kHeavy); |
486 | 363 | int r = Lookup(i); |
487 | 363 | if (r >= 0) { |
488 | 167 | cached_weight += weight; |
489 | 167 | ASSERT_EQ(1000 + i, r); |
490 | 167 | } |
491 | 363 | } |
492 | 1 | ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10); |
493 | 1 | } |
494 | | |
495 | 1 | TEST_F(CacheTest, NewId) { |
496 | 1 | uint64_t a = cache_->NewId(); |
497 | 1 | uint64_t b = cache_->NewId(); |
498 | 1 | ASSERT_NE(a, b); |
499 | 1 | } |
500 | | |
501 | | |
502 | | class Value { |
503 | | private: |
504 | | size_t v_; |
505 | | public: |
506 | 92 | explicit Value(size_t v) : v_(v) { } |
507 | | |
508 | 92 | ~Value() { std::cout << v_ << " is destructed\n"; } |
509 | | }; |
510 | | |
511 | | namespace { |
512 | 92 | void deleter(const Slice& key, void* value) { |
513 | 92 | delete static_cast<Value *>(value); |
514 | 92 | } |
515 | | } // namespace |
516 | | |
517 | 1 | TEST_F(CacheTest, SetCapacity) { |
518 | | // test1: increase capacity |
519 | | // lets create a cache with capacity 10. |
520 | 1 | std::shared_ptr<Cache> cache = NewLRUCache(10, 0); |
521 | 1 | std::vector<Cache::Handle*> handles(16); |
522 | | |
523 | | // Insert 8 entries, but not releasing. |
524 | 9 | for (size_t i = 0; i < 8; i++) { |
525 | 8 | std::string key = ToString(i + 1); |
526 | 8 | Status s = cache->Insert(key, kTestQueryId, new Value(i + 1), 1, &deleter, &handles[i]); |
527 | 8 | ASSERT_TRUE(s.ok()); |
528 | 8 | } |
529 | 1 | ASSERT_EQ(10U, cache->GetCapacity()); |
530 | 1 | ASSERT_EQ(8U, cache->GetUsage()); |
531 | 1 | cache->SetCapacity(20); |
532 | 1 | ASSERT_EQ(20U, cache->GetCapacity()); |
533 | 1 | ASSERT_EQ(8U, cache->GetUsage()); |
534 | | |
535 | | // test2: decrease capacity |
536 | | // insert 8 more elements to cache, then release 8, |
537 | | // then decrease capacity to 15. |
538 | | // and usage should be 15 from all singles |
539 | 9 | for (size_t i = 8; i < 16; i++) { |
540 | 8 | std::string key = ToString(i+1); |
541 | 8 | Status s = cache->Insert(key, kTestQueryId, new Value(i + 1), 1, &deleter, &handles[i]); |
542 | 8 | ASSERT_TRUE(s.ok()); |
543 | 8 | } |
544 | | |
545 | 1 | ASSERT_EQ(20U, cache->GetCapacity()); |
546 | 1 | ASSERT_EQ(16U, cache->GetUsage()); |
547 | 9 | for (size_t i = 0; i < 8; i++) { |
548 | 8 | cache->Release(handles[i]); |
549 | 8 | } |
550 | 1 | ASSERT_EQ(20U, cache->GetCapacity()); |
551 | 1 | ASSERT_EQ(16U, cache->GetUsage()); |
552 | 1 | cache->SetCapacity(15); |
553 | 1 | ASSERT_EQ(15, cache->GetCapacity()); |
554 | 1 | ASSERT_EQ(15, cache->GetUsage()); |
555 | | |
556 | | // release remaining 8 to keep valgrind happy |
557 | 9 | for (size_t i = 8; i < 16; i++) { |
558 | 8 | cache->Release(handles[i]); |
559 | 8 | } |
560 | 1 | } |
561 | | |
562 | 1 | TEST_F(CacheTest, SetStrictCapacityLimit) { |
563 | 1 | std::shared_ptr<Cache> cache = NewLRUCache(10, 0, true); |
564 | 1 | std::vector<Cache::Handle*> handles(2); |
565 | 1 | Status s; |
566 | | |
567 | 3 | for (size_t i = 0; i < 2; i++) { |
568 | 2 | std::string key = ToString(i + 1); |
569 | 2 | s = cache->Insert(key, kTestQueryId, new Value(i + 1), 1, &deleter, &handles[i]); |
570 | 2 | ASSERT_TRUE(s.ok()); |
571 | 2 | ASSERT_NE(nullptr, handles[i]); |
572 | 2 | } |
573 | | |
574 | 1 | Cache::Handle* handle; |
575 | 1 | std::string extra_key = "extra"; |
576 | 1 | Value* extra_value = new Value(0); |
577 | | |
578 | 1 | s = cache->Insert(extra_key, kTestQueryId, extra_value, 1, &deleter, &handle); |
579 | 1 | ASSERT_TRUE(s.IsIncomplete()); |
580 | 1 | ASSERT_EQ(nullptr, handle); |
581 | | // test insert without handle |
582 | 1 | s = cache->Insert(extra_key, kTestQueryId, extra_value, 1, &deleter); |
583 | 1 | ASSERT_TRUE(s.IsIncomplete()); |
584 | 1 | ASSERT_EQ(2, cache->GetUsage()); |
585 | | |
586 | 3 | for (size_t i = 0; i < 2; i++) { |
587 | 2 | cache->Release(handles[i]); |
588 | 2 | } |
589 | 1 | } |
590 | | |
591 | 1 | TEST_F(CacheTest, OverCapacity) { |
592 | 1 | size_t cache_size = 50; |
593 | 1 | size_t single_touch_cache_size = cache_size; |
594 | | |
595 | | // a LRUCache with n entries and one shard only |
596 | 1 | std::shared_ptr<Cache> cache = NewLRUCache(cache_size, 0); |
597 | | |
598 | 1 | std::vector<Cache::Handle*> handles(cache_size + 1); |
599 | | |
600 | | // Insert cache_size + 1 entries, but not releasing. |
601 | 52 | for (size_t i = 0; i < single_touch_cache_size + 1; i++) { |
602 | 51 | std::string key = ToString(i + 1); |
603 | 51 | Status s = cache->Insert(key, kTestQueryId, new Value(i + 1), 1, &deleter, &handles[i]); |
604 | 51 | ASSERT_TRUE(s.ok()); |
605 | 51 | } |
606 | | |
607 | | // Guess what's in the cache now? |
608 | 52 | for (size_t i = 0; i < single_touch_cache_size + 1; i++) { |
609 | 51 | std::string key = ToString(i + 1); |
610 | 51 | auto h = cache->Lookup(key, kTestQueryId); |
611 | 51 | std::cout << key << (h?" found\n":" not found\n"); |
612 | 51 | ASSERT_TRUE(h != nullptr); |
613 | 51 | if (h) cache->Release(h); |
614 | 51 | } |
615 | | |
616 | | // the cache is over capacity since nothing could be evicted |
617 | 1 | ASSERT_EQ(single_touch_cache_size + 1U, cache->GetUsage()); |
618 | 52 | for (size_t i = 0; i < single_touch_cache_size + 1; i++) { |
619 | 51 | cache->Release(handles[i]); |
620 | 51 | } |
621 | | |
622 | | // cache is under capacity now since elements were released |
623 | 1 | ASSERT_EQ(single_touch_cache_size, cache->GetUsage()); |
624 | | |
625 | | // element 0 is evicted and the rest is there |
626 | | // This is consistent with the LRU policy since the element 0 |
627 | | // was released first |
628 | 52 | for (size_t i = 0; i < single_touch_cache_size + 1; i++) { |
629 | 51 | std::string key = ToString(i + 1); |
630 | 51 | auto h = cache->Lookup(key, kTestQueryId); |
631 | 51 | if (h) { |
632 | 50 | ASSERT_NE(i, 0U); |
633 | 50 | cache->Release(h); |
634 | 1 | } else { |
635 | 1 | ASSERT_EQ(i, 0U); |
636 | 1 | } |
637 | 51 | } |
638 | 1 | } |
639 | | |
640 | | namespace { |
641 | | std::vector<std::pair<int, int>> callback_state; |
642 | 10 | void callback(void* entry, size_t charge) { |
643 | 10 | callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)}); |
644 | 10 | } |
645 | | }; |
646 | | |
647 | 1 | TEST_F(CacheTest, ApplyToAllCacheEntiresTest) { |
648 | 1 | std::vector<std::pair<int, int>> inserted; |
649 | 1 | callback_state.clear(); |
650 | | |
651 | 11 | for (int i = 0; i < 10; ++i) { |
652 | 10 | ASSERT_OK(Insert(i, i * 2, i + 1)); |
653 | 10 | inserted.push_back({i * 2, i + 1}); |
654 | 10 | } |
655 | 1 | cache_->ApplyToAllCacheEntries(callback, true); |
656 | | |
657 | 1 | sort(inserted.begin(), inserted.end()); |
658 | 1 | sort(callback_state.begin(), callback_state.end()); |
659 | 1 | ASSERT_TRUE(inserted == callback_state); |
660 | 1 | } |
661 | | |
662 | 7 | void AssertCacheSizes(Cache *cache, size_t single_touch_count, size_t multi_touch_count) { |
663 | 7 | std::vector<std::pair<size_t, size_t>> usages = cache->TEST_GetIndividualUsages(); |
664 | 7 | ASSERT_EQ(usages.size(), 1); |
665 | 7 | ASSERT_EQ(usages[0].first, single_touch_count); |
666 | 7 | ASSERT_EQ(usages[0].second, multi_touch_count); |
667 | 7 | } |
668 | | |
669 | | CHECKED_STATUS InsertIntoCache(const std::shared_ptr<Cache>& cache, int key, int value, |
670 | | int query_id = CacheTest::kTestQueryId, int charge = 1, |
671 | 22 | Cache::Handle **handle = nullptr) { |
672 | 22 | return cache->Insert(ToString(key), query_id, new Value(value), charge, &deleter, handle); |
673 | 22 | } |
674 | | |
675 | 1 | TEST_F(CacheTest, OverFlowTest) { |
676 | 1 | FLAGS_cache_single_touch_ratio = 0.2; |
677 | 1 | size_t cache_size = 10; |
678 | 1 | std::shared_ptr<Cache> cache = NewLRUCache(cache_size, 0); |
679 | | |
680 | | // Insert single touch entries until the max. |
681 | 11 | for (int i = 0; i <= 9; i++) { |
682 | 10 | ASSERT_OK(InsertIntoCache(cache, i /* key */, i /* value */)); |
683 | 10 | } |
684 | 1 | AssertCacheSizes(cache.get(), 10, 0); |
685 | | |
686 | | // Insert a new element and make sure that the size is still the same. |
687 | 1 | { |
688 | 1 | ASSERT_OK(InsertIntoCache(cache, 10 /* key */, 10 /* value */)); |
689 | | |
690 | | // Make sure that entry '0' is not present. |
691 | 1 | Cache::Handle* h = cache->Lookup(ToString(0), 100 /* query_id */); |
692 | 1 | ASSERT_EQ(h, nullptr); |
693 | 1 | } |
694 | 1 | AssertCacheSizes(cache.get(), 10, 0); |
695 | | |
696 | | // Push the min number of elements to multi-touch cache. |
697 | 3 | for (int i = 1; i <= 2; ++i) { |
698 | 2 | Cache::Handle* h = cache->Lookup(ToString(i), 100 /* query_id */); |
699 | 2 | cache->Release(h); |
700 | 2 | } |
701 | 1 | AssertCacheSizes(cache.get(), 8, 2); |
702 | | |
703 | | // Perform Lookups for elements in the single-touch cache to move them to multi-touch cache. |
704 | 7 | for (int i = 3; i <= 8; ++i) { |
705 | 6 | Cache::Handle* h = cache->Lookup(ToString(i), 100 /* query_id */); |
706 | 6 | cache->Release(h); |
707 | 6 | } |
708 | 1 | AssertCacheSizes(cache.get(), 2, 8); |
709 | | |
710 | | // Perform more lookup and make sure that the size of multi-touch doesn't grow. |
711 | 1 | { |
712 | 1 | Cache::Handle* h = cache->Lookup(ToString(9), 100 /* query_id */); |
713 | 1 | cache->Release(h); |
714 | 1 | AssertCacheSizes(cache.get(), 1, 8); |
715 | 1 | } |
716 | 1 | { |
717 | 1 | Cache::Handle* h = cache->Lookup(ToString(10), 100 /* query_id */); |
718 | 1 | cache->Release(h); |
719 | 1 | AssertCacheSizes(cache.get(), 0, 8); |
720 | 1 | } |
721 | | |
722 | | // Perform 2 more insertions to make sure we can insert again. |
723 | 3 | for (int i = 11; i <= 12; ++i) { |
724 | 2 | ASSERT_OK(InsertIntoCache(cache, i /* key */, i /* value */)); |
725 | 2 | } |
726 | 1 | AssertCacheSizes(cache.get(), 2, 8); |
727 | | |
728 | | // Make sure that direct insertion into the multi-touch cache works properly. |
729 | 1 | FLAGS_cache_single_touch_ratio = 0.4; |
730 | 1 | cache_size = 5; |
731 | 1 | cache = NewLRUCache(cache_size, 0); |
732 | | |
733 | 6 | for (int i = 1; i <= 5; ++i) { |
734 | 5 | ASSERT_OK(InsertIntoCache(cache, i /* key */, i /* value */)); |
735 | 5 | } |
736 | 1 | Cache::Handle *h; |
737 | 1 | ASSERT_OK(InsertIntoCache(cache, 9 /* key */, 9 /* value */, CacheTest::kTestQueryId, 1, &h)); |
738 | | |
739 | | // Insert elements directly into the multi-touch pool |
740 | 4 | for (int i = 6; i <= 8; ++i) { |
741 | 3 | ASSERT_OK(InsertIntoCache(cache, i /* key */, i /* value */, -1 /* query_id */)); |
742 | 3 | } |
743 | 1 | cache->Release(h); |
744 | 1 | } |
745 | | |
746 | | } // namespace rocksdb |
747 | | |
748 | 13.2k | int main(int argc, char** argv) { |
749 | 13.2k | ::testing::InitGoogleTest(&argc, argv); |
750 | 13.2k | return RUN_ALL_TESTS(); |
751 | 13.2k | } |