/Users/deen/code/yugabyte-db/src/yb/rocksdb/db/skiplist.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
21 | | // Use of this source code is governed by a BSD-style license that can be |
22 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
23 | | |
24 | | // Thread safety |
25 | | // ------------- |
26 | | // |
27 | | // Writes require external synchronization, most likely a mutex. |
28 | | // Reads require a guarantee that the SkipList will not be destroyed |
29 | | // while the read is in progress. Apart from that, reads progress |
30 | | // without any internal locking or synchronization. |
31 | | // |
32 | | // Invariants: |
33 | | // |
34 | | // (1) Allocated nodes are never deleted until the SkipList is |
35 | | // destroyed. This is trivially guaranteed by the code since we |
36 | | // never delete any skip list nodes. |
37 | | // |
38 | | // (2) The contents of a Node except for the next/prev pointers are |
39 | | // immutable after the Node has been linked into the SkipList. |
40 | | // Only Insert() modifies the list, and it is careful to initialize |
41 | | // a node and use release-stores to publish the nodes in one or |
42 | | // more lists. |
43 | | // |
44 | | // ... prev vs. next pointer ordering ... |
45 | | // |
46 | | |
47 | | #ifndef YB_ROCKSDB_DB_SKIPLIST_H |
48 | | #define YB_ROCKSDB_DB_SKIPLIST_H |
49 | | |
50 | | #pragma once |
51 | | |
52 | | #include <atomic> |
53 | | |
54 | | #include <glog/logging.h> |
55 | | |
56 | | #include "yb/rocksdb/util/allocator.h" |
57 | | #include "yb/rocksdb/util/random.h" |
58 | | |
59 | | namespace rocksdb { |
60 | | |
61 | | // Base skip list implementation shares common logic for SkipList and SingleWriterInlineSkipList. |
62 | | // Since concurrent writes are not allowed many operations could be simplified. |
63 | | // See InlineSkipList for an implementation that supports concurrent writes. |
64 | | template<class Key, class Comparator, class NodeType> |
65 | | class SkipListBase { |
66 | | private: |
67 | | typedef NodeType Node; |
68 | | |
69 | | public: |
70 | | // Create a new SkipList object that will use "cmp" for comparing keys, |
71 | | // and will allocate memory using "*allocator". Objects allocated in the |
72 | | // allocator must remain allocated for the lifetime of the skiplist object. |
73 | | explicit SkipListBase(Comparator cmp, Allocator* allocator, |
74 | | int32_t max_height = 12, int32_t branching_factor = 4); |
75 | | |
76 | | // No copying allowed |
77 | | SkipListBase(const SkipListBase&) = delete; |
78 | | void operator=(const SkipListBase&) = delete; |
79 | | |
80 | | // Returns true iff an entry that compares equal to key is in the list. |
81 | | bool Contains(Key key) const; |
82 | | |
83 | | bool Erase(Key key, Comparator cmp); |
84 | | |
85 | | // Return estimated number of entries smaller than `key`. |
86 | | uint64_t EstimateCount(Key key) const; |
87 | | |
88 | | // Iteration over the contents of a skip list |
89 | | class Iterator { |
90 | | public: |
91 | | // Initialize an iterator over the specified list. |
92 | | // The returned iterator is not valid. |
93 | | explicit Iterator(const SkipListBase* list); |
94 | | |
95 | | // Change the underlying skiplist used for this iterator |
96 | | // This enables us not changing the iterator without deallocating |
97 | | // an old one and then allocating a new one |
98 | | void SetList(const SkipListBase* list); |
99 | | |
100 | | // Returns true iff the iterator is positioned at a valid node. |
101 | | bool Valid() const; |
102 | | |
103 | | // Returns the key at the current position. |
104 | | // REQUIRES: Valid() |
105 | | Key key() const; |
106 | | |
107 | | // Advances to the next position. |
108 | | // REQUIRES: Valid() |
109 | | void Next(); |
110 | | |
111 | | // Advances to the previous position. |
112 | | // REQUIRES: Valid() |
113 | | void Prev(); |
114 | | |
115 | | // Advance to the first entry with a key >= target |
116 | | void Seek(Key target); |
117 | | |
118 | | // Position at the first entry in list. |
119 | | // Final state of iterator is Valid() iff list is not empty. |
120 | | void SeekToFirst(); |
121 | | |
122 | | // Position at the last entry in list. |
123 | | // Final state of iterator is Valid() iff list is not empty. |
124 | | void SeekToLast(); |
125 | | |
126 | | private: |
127 | | const SkipListBase* list_; |
128 | | Node* node_; |
129 | | // Intentionally copyable |
130 | | }; |
131 | | |
132 | | protected: |
133 | | // We do insert in 3 phases: |
134 | | // 1) Prepare key insertion |
135 | | // 2) Allocate and initialize node |
136 | | // 3) Complete newly allocated node |
137 | | // (1) and (3) are generic phases, while (2) is implementation specific. |
138 | | // REQUIRES: nothing that compares equal to key is currently in the list. |
139 | | void PrepareInsert(Key key); |
140 | | void CompleteInsert(NodeType* node, int height); |
141 | | |
142 | | int RandomHeight(); |
143 | | |
144 | | Allocator* const allocator_; // Allocator used for allocations of nodes |
145 | | |
146 | | private: |
147 | | const uint16_t kMaxHeight_; |
148 | | const uint16_t kBranching_; |
149 | | const uint32_t kScaledInverseBranching_; |
150 | | |
151 | | // Immutable after construction |
152 | | Comparator const compare_; |
153 | | |
154 | | Node* const head_; |
155 | | |
156 | | // Modified only by Insert(). Read racily by readers, but stale |
157 | | // values are ok. |
158 | | std::atomic<int> max_height_; // Height of the entire list |
159 | | |
160 | | // Used for optimizing sequential insert patterns. Tricky. prev_[i] for |
161 | | // i up to max_height_ is the predecessor of prev_[0] and prev_height_ |
162 | | // is the height of prev_[0]. prev_[0] can only be equal to head before |
163 | | // insertion, in which case max_height_ and prev_height_ are 1. |
164 | | Node** prev_; |
165 | | int32_t prev_height_; |
166 | | |
167 | | // Whether prev_ is valid, prev_ is invalidated during erase. |
168 | | bool prev_valid_ = true; |
169 | | |
170 | 745M | int GetMaxHeight() const { |
171 | 745M | return max_height_.load(std::memory_order_relaxed); |
172 | 745M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::GetMaxHeight() const Line | Count | Source | 170 | 3.02M | int GetMaxHeight() const { | 171 | 3.02M | return max_height_.load(std::memory_order_relaxed); | 172 | 3.02M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::GetMaxHeight() const Line | Count | Source | 170 | 732M | int GetMaxHeight() const { | 171 | 732M | return max_height_.load(std::memory_order_relaxed); | 172 | 732M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::GetMaxHeight() const Line | Count | Source | 170 | 9.95M | int GetMaxHeight() const { | 171 | 9.95M | return max_height_.load(std::memory_order_relaxed); | 172 | 9.95M | } |
|
173 | | |
174 | 243M | bool Equal(Key a, Key b) const { return (compare_(a, b) == 0); } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Equal(char const* const&, char const* const&) const Line | Count | Source | 174 | 1.12M | bool Equal(Key a, Key b) const { return (compare_(a, b) == 0); } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Equal(char const*, char const*) const Line | Count | Source | 174 | 241M | bool Equal(Key a, Key b) const { return (compare_(a, b) == 0); } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Equal(rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchIndexEntry* const&) const Line | Count | Source | 174 | 281 | bool Equal(Key a, Key b) const { return (compare_(a, b) == 0); } |
|
175 | | |
176 | | // Return true if key is greater than the data stored in "n" |
177 | | bool KeyIsAfterNode(Key key, Node* n) const; |
178 | | |
179 | | // Returns the earliest node with a key >= key. |
180 | | // Return nullptr if there is no such node. |
181 | | Node* FindGreaterOrEqual(Key key) const; |
182 | | |
183 | | // Return the latest node with a key < key. |
184 | | // Return head_ if there is no such node. |
185 | | // Fills prev[level] with pointer to previous node at "level" for every |
186 | | // level in [0..max_height_-1], if prev is non-null. |
187 | | Node* FindLessThan(Key key, Node** prev = nullptr) const; |
188 | | |
189 | | // Return the last node in the list. |
190 | | // Return head_ if list is empty. |
191 | | Node* FindLast() const; |
192 | | }; |
193 | | |
194 | | // Implementation details follow |
195 | | template<typename Key> |
196 | | struct SkipListNode { |
197 | 9.14M | explicit SkipListNode(const Key& k) : key(k) { } rocksdb::SkipListNode<char const*>::SkipListNode(char const* const&) Line | Count | Source | 197 | 1.13M | explicit SkipListNode(const Key& k) : key(k) { } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::SkipListNode(rocksdb::WriteBatchIndexEntry* const&) Line | Count | Source | 197 | 8.00M | explicit SkipListNode(const Key& k) : key(k) { } |
|
198 | | |
199 | | Key const key; |
200 | | |
201 | | // Accessors/mutators for links. Wrapped in methods so we can |
202 | | // add the appropriate barriers as necessary. |
203 | 77.5M | SkipListNode* Next(int n) { |
204 | 77.5M | DCHECK_GE(n, 0); |
205 | | // Use an 'acquire load' so that we observe a fully initialized |
206 | | // version of the returned Node. |
207 | 77.5M | return next_[n].load(std::memory_order_acquire); |
208 | 77.5M | } rocksdb::SkipListNode<char const*>::Next(int) Line | Count | Source | 203 | 59.1M | SkipListNode* Next(int n) { | 204 | 59.1M | DCHECK_GE(n, 0); | 205 | | // Use an 'acquire load' so that we observe a fully initialized | 206 | | // version of the returned Node. | 207 | 59.1M | return next_[n].load(std::memory_order_acquire); | 208 | 59.1M | } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::Next(int) Line | Count | Source | 203 | 18.3M | SkipListNode* Next(int n) { | 204 | 18.3M | DCHECK_GE(n, 0); | 205 | | // Use an 'acquire load' so that we observe a fully initialized | 206 | | // version of the returned Node. | 207 | 18.3M | return next_[n].load(std::memory_order_acquire); | 208 | 18.3M | } |
|
209 | | |
210 | 20.1M | void SetNext(int n, SkipListNode* x) { |
211 | 20.1M | DCHECK_GE(n, 0); |
212 | | // Use a 'release store' so that anybody who reads through this |
213 | | // pointer observes a fully initialized version of the inserted node. |
214 | 20.1M | next_[n].store(x, std::memory_order_release); |
215 | 20.1M | } rocksdb::SkipListNode<char const*>::SetNext(int, rocksdb::SkipListNode<char const*>*) Line | Count | Source | 210 | 1.69M | void SetNext(int n, SkipListNode* x) { | 211 | 1.69M | DCHECK_GE(n, 0); | 212 | | // Use a 'release store' so that anybody who reads through this | 213 | | // pointer observes a fully initialized version of the inserted node. | 214 | 1.69M | next_[n].store(x, std::memory_order_release); | 215 | 1.69M | } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::SetNext(int, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>*) Line | Count | Source | 210 | 18.4M | void SetNext(int n, SkipListNode* x) { | 211 | 18.4M | DCHECK_GE(n, 0); | 212 | | // Use a 'release store' so that anybody who reads through this | 213 | | // pointer observes a fully initialized version of the inserted node. | 214 | 18.4M | next_[n].store(x, std::memory_order_release); | 215 | 18.4M | } |
|
216 | | |
217 | | // No-barrier variants that can be safely used in a few locations. |
218 | 19.4M | SkipListNode* NoBarrier_Next(int n) { |
219 | 19.4M | DCHECK_GE(n, 0); |
220 | 19.4M | return next_[n].load(std::memory_order_relaxed); |
221 | 19.4M | } rocksdb::SkipListNode<char const*>::NoBarrier_Next(int) Line | Count | Source | 218 | 2.50M | SkipListNode* NoBarrier_Next(int n) { | 219 | 2.50M | DCHECK_GE(n, 0); | 220 | 2.50M | return next_[n].load(std::memory_order_relaxed); | 221 | 2.50M | } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::NoBarrier_Next(int) Line | Count | Source | 218 | 16.9M | SkipListNode* NoBarrier_Next(int n) { | 219 | 16.9M | DCHECK_GE(n, 0); | 220 | 16.9M | return next_[n].load(std::memory_order_relaxed); | 221 | 16.9M | } |
|
222 | | |
223 | 11.1M | void NoBarrier_SetNext(int n, SkipListNode* x) { |
224 | 11.1M | DCHECK_GE(n, 0); |
225 | 11.1M | next_[n].store(x, std::memory_order_relaxed); |
226 | 11.1M | } rocksdb::SkipListNode<char const*>::NoBarrier_SetNext(int, rocksdb::SkipListNode<char const*>*) Line | Count | Source | 223 | 1.42M | void NoBarrier_SetNext(int n, SkipListNode* x) { | 224 | 1.42M | DCHECK_GE(n, 0); | 225 | 1.42M | next_[n].store(x, std::memory_order_relaxed); | 226 | 1.42M | } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::NoBarrier_SetNext(int, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>*) Line | Count | Source | 223 | 9.69M | void NoBarrier_SetNext(int n, SkipListNode* x) { | 224 | 9.69M | DCHECK_GE(n, 0); | 225 | 9.69M | next_[n].store(x, std::memory_order_relaxed); | 226 | 9.69M | } |
|
227 | | |
228 | 9.14M | static SkipListNode<Key>* Create(Allocator* allocator, const Key& key, int height) { |
229 | 9.14M | char* mem = allocator->AllocateAligned( |
230 | 9.14M | sizeof(SkipListNode<Key>) + sizeof(std::atomic<SkipListNode<Key>*>) * (height - 1)); |
231 | 9.14M | return new (mem) SkipListNode<Key>(key); |
232 | 9.14M | } rocksdb::SkipListNode<char const*>::Create(rocksdb::Allocator*, char const* const&, int) Line | Count | Source | 228 | 1.13M | static SkipListNode<Key>* Create(Allocator* allocator, const Key& key, int height) { | 229 | 1.13M | char* mem = allocator->AllocateAligned( | 230 | 1.13M | sizeof(SkipListNode<Key>) + sizeof(std::atomic<SkipListNode<Key>*>) * (height - 1)); | 231 | 1.13M | return new (mem) SkipListNode<Key>(key); | 232 | 1.13M | } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::Create(rocksdb::Allocator*, rocksdb::WriteBatchIndexEntry* const&, int) Line | Count | Source | 228 | 8.01M | static SkipListNode<Key>* Create(Allocator* allocator, const Key& key, int height) { | 229 | 8.01M | char* mem = allocator->AllocateAligned( | 230 | 8.01M | sizeof(SkipListNode<Key>) + sizeof(std::atomic<SkipListNode<Key>*>) * (height - 1)); | 231 | 8.01M | return new (mem) SkipListNode<Key>(key); | 232 | 8.01M | } |
|
233 | | |
234 | 800k | static SkipListNode<Key>* CreateHead(Allocator* allocator, int height) { |
235 | 800k | return Create(allocator, Key() /* any key will do */, height); |
236 | 800k | } rocksdb::SkipListNode<char const*>::CreateHead(rocksdb::Allocator*, int) Line | Count | Source | 234 | 64.7k | static SkipListNode<Key>* CreateHead(Allocator* allocator, int height) { | 235 | 64.7k | return Create(allocator, Key() /* any key will do */, height); | 236 | 64.7k | } |
rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>::CreateHead(rocksdb::Allocator*, int) Line | Count | Source | 234 | 735k | static SkipListNode<Key>* CreateHead(Allocator* allocator, int height) { | 235 | 735k | return Create(allocator, Key() /* any key will do */, height); | 236 | 735k | } |
|
237 | | |
238 | | private: |
239 | | // Array of length equal to the node height. next_[0] is lowest level link. |
240 | | std::atomic<SkipListNode*> next_[1]; |
241 | | }; |
242 | | |
243 | | // Generic skip list implementation - allows any key comparable with specified comparator. |
244 | | // Please note thread safety at top of this file. |
245 | | template<typename Key, class Comparator> |
246 | | class SkipList : public SkipListBase<const Key&, Comparator, SkipListNode<Key>> { |
247 | | private: |
248 | | typedef SkipListNode<Key> Node; |
249 | | typedef SkipListBase<const Key&, Comparator, SkipListNode<Key>> Base; |
250 | | |
251 | | public: |
252 | | template<class... Args> |
253 | | explicit SkipList(Args&&... args) |
254 | 801k | : Base(std::forward<Args>(args)...) { |
255 | 801k | } rocksdb::SkipList<char const*, rocksdb::MemTableRep::KeyComparator const&>::SkipList<rocksdb::MemTableRep::KeyComparator const&, rocksdb::MemTableAllocator*&>(rocksdb::MemTableRep::KeyComparator const&, rocksdb::MemTableAllocator*&) Line | Count | Source | 254 | 52 | : Base(std::forward<Args>(args)...) { | 255 | 52 | } |
rocksdb::SkipList<char const*, rocksdb::MemTableRep::KeyComparator const&>::SkipList<rocksdb::MemTableRep::KeyComparator const&, rocksdb::Arena*&>(rocksdb::MemTableRep::KeyComparator const&, rocksdb::Arena*&) Line | Count | Source | 254 | 1.30k | : Base(std::forward<Args>(args)...) { | 255 | 1.30k | } |
rocksdb::SkipList<char const*, rocksdb::MemTableRep::KeyComparator const&>::SkipList<rocksdb::MemTableRep::KeyComparator const&, rocksdb::MemTableAllocator* const&, int const&, int const&>(rocksdb::MemTableRep::KeyComparator const&, rocksdb::MemTableAllocator* const&, int const&, int const&) Line | Count | Source | 254 | 63.3k | : Base(std::forward<Args>(args)...) { | 255 | 63.3k | } |
rocksdb::SkipList<rocksdb::WriteBatchIndexEntry*, rocksdb::WriteBatchEntryComparator const&>::SkipList<rocksdb::WriteBatchEntryComparator&, rocksdb::Arena*>(rocksdb::WriteBatchEntryComparator&, rocksdb::Arena*&&) Line | Count | Source | 254 | 736k | : Base(std::forward<Args>(args)...) { | 255 | 736k | } |
|
256 | | |
257 | 8.35M | void Insert(const Key& key) { |
258 | 8.35M | Base::PrepareInsert(key); |
259 | 8.35M | int height = Base::RandomHeight(); |
260 | 8.35M | Node* x = Node::Create(Base::allocator_, key, height); |
261 | 8.35M | Base::CompleteInsert(x, height); |
262 | 8.35M | } rocksdb::SkipList<char const*, rocksdb::MemTableRep::KeyComparator const&>::Insert(char const* const&) Line | Count | Source | 257 | 1.07M | void Insert(const Key& key) { | 258 | 1.07M | Base::PrepareInsert(key); | 259 | 1.07M | int height = Base::RandomHeight(); | 260 | 1.07M | Node* x = Node::Create(Base::allocator_, key, height); | 261 | 1.07M | Base::CompleteInsert(x, height); | 262 | 1.07M | } |
rocksdb::SkipList<rocksdb::WriteBatchIndexEntry*, rocksdb::WriteBatchEntryComparator const&>::Insert(rocksdb::WriteBatchIndexEntry* const&) Line | Count | Source | 257 | 7.28M | void Insert(const Key& key) { | 258 | 7.28M | Base::PrepareInsert(key); | 259 | 7.28M | int height = Base::RandomHeight(); | 260 | 7.28M | Node* x = Node::Create(Base::allocator_, key, height); | 261 | 7.28M | Base::CompleteInsert(x, height); | 262 | 7.28M | } |
|
263 | | |
264 | | private: |
265 | | Node* NewNode(const Key& key, int height); |
266 | | }; |
267 | | |
268 | | template<typename Key, class Comparator> |
269 | | typename SkipList<Key, Comparator>::Node* |
270 | | SkipList<Key, Comparator>::NewNode(const Key& key, int height) { |
271 | | return Node::Create(Base::allocator_, key, height); |
272 | | } |
273 | | |
274 | | template<class Key, class Comparator, class NodeType> |
275 | | SkipListBase<Key, Comparator, NodeType>::Iterator::Iterator( |
276 | 39.5M | const SkipListBase* list) { |
277 | 39.5M | SetList(list); |
278 | 39.5M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::Iterator(rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> > const*) Line | Count | Source | 276 | 607k | const SkipListBase* list) { | 277 | 607k | SetList(list); | 278 | 607k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::Iterator(rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode> const*) Line | Count | Source | 276 | 38.4M | const SkipListBase* list) { | 277 | 38.4M | SetList(list); | 278 | 38.4M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::Iterator(rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> > const*) Line | Count | Source | 276 | 501k | const SkipListBase* list) { | 277 | 501k | SetList(list); | 278 | 501k | } |
|
279 | | |
280 | | template<class Key, class Comparator, class NodeType> |
281 | 39.6M | void SkipListBase<Key, Comparator, NodeType>::Iterator::SetList(const SkipListBase* list) { |
282 | 39.6M | list_ = list; |
283 | 39.6M | node_ = nullptr; |
284 | 39.6M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::SetList(rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> > const*) Line | Count | Source | 281 | 708k | void SkipListBase<Key, Comparator, NodeType>::Iterator::SetList(const SkipListBase* list) { | 282 | 708k | list_ = list; | 283 | 708k | node_ = nullptr; | 284 | 708k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::SetList(rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode> const*) Line | Count | Source | 281 | 38.4M | void SkipListBase<Key, Comparator, NodeType>::Iterator::SetList(const SkipListBase* list) { | 282 | 38.4M | list_ = list; | 283 | 38.4M | node_ = nullptr; | 284 | 38.4M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::SetList(rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> > const*) Line | Count | Source | 281 | 501k | void SkipListBase<Key, Comparator, NodeType>::Iterator::SetList(const SkipListBase* list) { | 282 | 501k | list_ = list; | 283 | 501k | node_ = nullptr; | 284 | 501k | } |
|
285 | | |
286 | | template<class Key, class Comparator, class NodeType> |
287 | 2.95G | bool SkipListBase<Key, Comparator, NodeType>::Iterator::Valid() const { |
288 | 2.95G | return node_ != nullptr; |
289 | 2.95G | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::Valid() const Line | Count | Source | 287 | 5.31M | bool SkipListBase<Key, Comparator, NodeType>::Iterator::Valid() const { | 288 | 5.31M | return node_ != nullptr; | 289 | 5.31M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::Valid() const Line | Count | Source | 287 | 2.93G | bool SkipListBase<Key, Comparator, NodeType>::Iterator::Valid() const { | 288 | 2.93G | return node_ != nullptr; | 289 | 2.93G | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::Valid() const Line | Count | Source | 287 | 10.1M | bool SkipListBase<Key, Comparator, NodeType>::Iterator::Valid() const { | 288 | 10.1M | return node_ != nullptr; | 289 | 10.1M | } |
|
290 | | |
291 | | template<class Key, class Comparator, class NodeType> |
292 | 1.88G | Key SkipListBase<Key, Comparator, NodeType>::Iterator::key() const { |
293 | 1.88G | DCHECK(Valid()); |
294 | 1.88G | return node_->key; |
295 | 1.88G | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::key() const Line | Count | Source | 292 | 1.77M | Key SkipListBase<Key, Comparator, NodeType>::Iterator::key() const { | 293 | 1.77M | DCHECK(Valid()); | 294 | 1.77M | return node_->key; | 295 | 1.77M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::key() const Line | Count | Source | 292 | 1.87G | Key SkipListBase<Key, Comparator, NodeType>::Iterator::key() const { | 293 | 1.87G | DCHECK(Valid()); | 294 | 1.87G | return node_->key; | 295 | 1.87G | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::key() const Line | Count | Source | 292 | 5.74M | Key SkipListBase<Key, Comparator, NodeType>::Iterator::key() const { | 293 | 5.74M | DCHECK(Valid()); | 294 | 5.74M | return node_->key; | 295 | 5.74M | } |
|
296 | | |
297 | | template<class Key, class Comparator, class NodeType> |
298 | 441M | void SkipListBase<Key, Comparator, NodeType>::Iterator::Next() { |
299 | 441M | DCHECK(Valid()); |
300 | 441M | node_ = node_->Next(0); |
301 | 441M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::Next() Line | Count | Source | 298 | 824k | void SkipListBase<Key, Comparator, NodeType>::Iterator::Next() { | 299 | 824k | DCHECK(Valid()); | 300 | 824k | node_ = node_->Next(0); | 301 | 824k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::Next() Line | Count | Source | 298 | 439M | void SkipListBase<Key, Comparator, NodeType>::Iterator::Next() { | 299 | 439M | DCHECK(Valid()); | 300 | 439M | node_ = node_->Next(0); | 301 | 439M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::Next() Line | Count | Source | 298 | 500k | void SkipListBase<Key, Comparator, NodeType>::Iterator::Next() { | 299 | 500k | DCHECK(Valid()); | 300 | 500k | node_ = node_->Next(0); | 301 | 500k | } |
|
302 | | |
303 | | template<class Key, class Comparator, class NodeType> |
304 | 1.57M | void SkipListBase<Key, Comparator, NodeType>::Iterator::Prev() { |
305 | | // Instead of using explicit "prev" links, we just search for the |
306 | | // last node that falls before key. |
307 | 1.57M | DCHECK(Valid()); |
308 | 1.57M | node_ = list_->FindLessThan(node_->key); |
309 | 1.57M | if (node_ == list_->head_) { |
310 | 326k | node_ = nullptr; |
311 | 326k | } |
312 | 1.57M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::Prev() Line | Count | Source | 304 | 7 | void SkipListBase<Key, Comparator, NodeType>::Iterator::Prev() { | 305 | | // Instead of using explicit "prev" links, we just search for the | 306 | | // last node that falls before key. | 307 | 7 | DCHECK(Valid()); | 308 | 7 | node_ = list_->FindLessThan(node_->key); | 309 | 7 | if (node_ == list_->head_) { | 310 | 1 | node_ = nullptr; | 311 | 1 | } | 312 | 7 | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::Prev() Line | Count | Source | 304 | 1.27M | void SkipListBase<Key, Comparator, NodeType>::Iterator::Prev() { | 305 | | // Instead of using explicit "prev" links, we just search for the | 306 | | // last node that falls before key. | 307 | 1.27M | DCHECK(Valid()); | 308 | 1.27M | node_ = list_->FindLessThan(node_->key); | 309 | 1.27M | if (node_ == list_->head_) { | 310 | 322k | node_ = nullptr; | 311 | 322k | } | 312 | 1.27M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::Prev() Line | Count | Source | 304 | 304k | void SkipListBase<Key, Comparator, NodeType>::Iterator::Prev() { | 305 | | // Instead of using explicit "prev" links, we just search for the | 306 | | // last node that falls before key. | 307 | 304k | DCHECK(Valid()); | 308 | 304k | node_ = list_->FindLessThan(node_->key); | 309 | 304k | if (node_ == list_->head_) { | 310 | 3.78k | node_ = nullptr; | 311 | 3.78k | } | 312 | 304k | } |
|
313 | | |
314 | | template<class Key, class Comparator, class NodeType> |
315 | 185M | void SkipListBase<Key, Comparator, NodeType>::Iterator::Seek(Key target) { |
316 | 185M | node_ = list_->FindGreaterOrEqual(target); |
317 | 185M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::Seek(char const* const&) Line | Count | Source | 315 | 705k | void SkipListBase<Key, Comparator, NodeType>::Iterator::Seek(Key target) { | 316 | 705k | node_ = list_->FindGreaterOrEqual(target); | 317 | 705k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::Seek(char const*) Line | Count | Source | 315 | 184M | void SkipListBase<Key, Comparator, NodeType>::Iterator::Seek(Key target) { | 316 | 184M | node_ = list_->FindGreaterOrEqual(target); | 317 | 184M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::Seek(rocksdb::WriteBatchIndexEntry* const&) Line | Count | Source | 315 | 756k | void SkipListBase<Key, Comparator, NodeType>::Iterator::Seek(Key target) { | 316 | 756k | node_ = list_->FindGreaterOrEqual(target); | 317 | 756k | } |
|
318 | | |
319 | | template<class Key, class Comparator, class NodeType> |
320 | 113k | void SkipListBase<Key, Comparator, NodeType>::Iterator::SeekToFirst() { |
321 | 113k | node_ = list_->head_->Next(0); |
322 | 113k | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::SeekToFirst() Line | Count | Source | 320 | 1.37k | void SkipListBase<Key, Comparator, NodeType>::Iterator::SeekToFirst() { | 321 | 1.37k | node_ = list_->head_->Next(0); | 322 | 1.37k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::SeekToFirst() Line | Count | Source | 320 | 111k | void SkipListBase<Key, Comparator, NodeType>::Iterator::SeekToFirst() { | 321 | 111k | node_ = list_->head_->Next(0); | 322 | 111k | } |
|
323 | | |
324 | | template<class Key, class Comparator, class NodeType> |
325 | 1.26M | void SkipListBase<Key, Comparator, NodeType>::Iterator::SeekToLast() { |
326 | 1.26M | node_ = list_->FindLast(); |
327 | 1.26M | if (node_ == list_->head_) { |
328 | 422k | node_ = nullptr; |
329 | 422k | } |
330 | 1.26M | } Unexecuted instantiation: rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Iterator::SeekToLast() rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Iterator::SeekToLast() Line | Count | Source | 325 | 1.25M | void SkipListBase<Key, Comparator, NodeType>::Iterator::SeekToLast() { | 326 | 1.25M | node_ = list_->FindLast(); | 327 | 1.25M | if (node_ == list_->head_) { | 328 | 422k | node_ = nullptr; | 329 | 422k | } | 330 | 1.25M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::Iterator::SeekToLast() Line | Count | Source | 325 | 1.41k | void SkipListBase<Key, Comparator, NodeType>::Iterator::SeekToLast() { | 326 | 1.41k | node_ = list_->FindLast(); | 327 | 1.41k | if (node_ == list_->head_) { | 328 | 88 | node_ = nullptr; | 329 | 88 | } | 330 | 1.41k | } |
|
331 | | |
332 | | template<class Key, class Comparator, class NodeType> |
333 | 371M | int SkipListBase<Key, Comparator, NodeType>::RandomHeight() { |
334 | 371M | auto rnd = Random::GetTLSInstance(); |
335 | | |
336 | | // Increase height with probability 1 in kBranching |
337 | 371M | int height = 1; |
338 | 494M | while (height < kMaxHeight_ && rnd->Next() < kScaledInverseBranching_494M ) { |
339 | 123M | height++; |
340 | 123M | } |
341 | 371M | return height; |
342 | 371M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::RandomHeight() Line | Count | Source | 333 | 1.07M | int SkipListBase<Key, Comparator, NodeType>::RandomHeight() { | 334 | 1.07M | auto rnd = Random::GetTLSInstance(); | 335 | | | 336 | | // Increase height with probability 1 in kBranching | 337 | 1.07M | int height = 1; | 338 | 1.43M | while (height < kMaxHeight_ && rnd->Next() < kScaledInverseBranching_1.42M ) { | 339 | 356k | height++; | 340 | 356k | } | 341 | 1.07M | return height; | 342 | 1.07M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::RandomHeight() Line | Count | Source | 333 | 362M | int SkipListBase<Key, Comparator, NodeType>::RandomHeight() { | 334 | 362M | auto rnd = Random::GetTLSInstance(); | 335 | | | 336 | | // Increase height with probability 1 in kBranching | 337 | 362M | int height = 1; | 338 | 483M | while (height < kMaxHeight_ && rnd->Next() < kScaledInverseBranching_483M ) { | 339 | 120M | height++; | 340 | 120M | } | 341 | 362M | return height; | 342 | 362M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::RandomHeight() Line | Count | Source | 333 | 7.28M | int SkipListBase<Key, Comparator, NodeType>::RandomHeight() { | 334 | 7.28M | auto rnd = Random::GetTLSInstance(); | 335 | | | 336 | | // Increase height with probability 1 in kBranching | 337 | 7.28M | int height = 1; | 338 | 9.71M | while (height < kMaxHeight_ && rnd->Next() < kScaledInverseBranching_9.70M ) { | 339 | 2.42M | height++; | 340 | 2.42M | } | 341 | 7.28M | return height; | 342 | 7.28M | } |
|
343 | | |
344 | | template<class Key, class Comparator, class NodeType> |
345 | 15.4G | bool SkipListBase<Key, Comparator, NodeType>::KeyIsAfterNode(Key key, Node* n) const { |
346 | | // nullptr n is considered infinite |
347 | 15.4G | return (n != nullptr) && (compare_(n->key, key) < 0)15.3G ; |
348 | 15.4G | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::KeyIsAfterNode(char const* const&, rocksdb::SkipListNode<char const*>*) const Line | Count | Source | 345 | 120M | bool SkipListBase<Key, Comparator, NodeType>::KeyIsAfterNode(Key key, Node* n) const { | 346 | | // nullptr n is considered infinite | 347 | 120M | return (n != nullptr) && (compare_(n->key, key) < 0)120M ; | 348 | 120M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::KeyIsAfterNode(char const*, rocksdb::SingleWriterInlineSkipListNode*) const Line | Count | Source | 345 | 15.3G | bool SkipListBase<Key, Comparator, NodeType>::KeyIsAfterNode(Key key, Node* n) const { | 346 | | // nullptr n is considered infinite | 347 | 15.3G | return (n != nullptr) && (compare_(n->key, key) < 0)15.2G ; | 348 | 15.3G | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::KeyIsAfterNode(rocksdb::WriteBatchIndexEntry* const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>*) const Line | Count | Source | 345 | 35.7M | bool SkipListBase<Key, Comparator, NodeType>::KeyIsAfterNode(Key key, Node* n) const { | 346 | | // nullptr n is considered infinite | 347 | 35.7M | return (n != nullptr) && (compare_(n->key, key) < 0)28.4M ; | 348 | 35.7M | } |
|
349 | | |
350 | | template<class Key, class Comparator, class NodeType> |
351 | | typename SkipListBase<Key, Comparator, NodeType>::Node* SkipListBase<Key, Comparator, NodeType>:: |
352 | 186M | FindGreaterOrEqual(Key key) const { |
353 | | // Note: It looks like we could reduce duplication by implementing |
354 | | // this function as FindLessThan(key)->Next(0), but we wouldn't be able |
355 | | // to exit early on equality and the result wouldn't even be correct. |
356 | | // A concurrent insert might occur after FindLessThan(key) but before |
357 | | // we get a chance to call Next(0). |
358 | 186M | Node* x = head_; |
359 | 186M | int level = GetMaxHeight() - 1; |
360 | 186M | Node* last_bigger = nullptr; |
361 | 3.92G | while (true3.92G ) { |
362 | 3.92G | Node* next = x->Next(level); |
363 | | // Make sure the lists are sorted |
364 | 3.92G | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); |
365 | | // Make sure we haven't overshot during our search |
366 | 3.92G | DCHECK(x == head_ || KeyIsAfterNode(key, x)); |
367 | 3.92G | int cmp = (next == nullptr || next == last_bigger3.53G ) |
368 | 3.92G | ? 1645M : compare_(next->key, key)3.27G ; |
369 | 3.92G | if (cmp == 0 || (3.91G cmp > 03.91G && level == 01.41G )) { |
370 | 186M | return next; |
371 | 3.73G | } else if (cmp < 0) { |
372 | | // Keep searching in this list |
373 | 2.52G | x = next; |
374 | 2.52G | } else { |
375 | | // Switch to next list, reuse compare_() result |
376 | 1.20G | last_bigger = next; |
377 | 1.20G | level--; |
378 | 1.20G | } |
379 | 3.92G | } |
380 | 186M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::FindGreaterOrEqual(char const* const&) const Line | Count | Source | 352 | 1.30M | FindGreaterOrEqual(Key key) const { | 353 | | // Note: It looks like we could reduce duplication by implementing | 354 | | // this function as FindLessThan(key)->Next(0), but we wouldn't be able | 355 | | // to exit early on equality and the result wouldn't even be correct. | 356 | | // A concurrent insert might occur after FindLessThan(key) but before | 357 | | // we get a chance to call Next(0). | 358 | 1.30M | Node* x = head_; | 359 | 1.30M | int level = GetMaxHeight() - 1; | 360 | 1.30M | Node* last_bigger = nullptr; | 361 | 38.0M | while (true38.0M ) { | 362 | 38.0M | Node* next = x->Next(level); | 363 | | // Make sure the lists are sorted | 364 | 38.0M | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); | 365 | | // Make sure we haven't overshot during our search | 366 | 38.0M | DCHECK(x == head_ || KeyIsAfterNode(key, x)); | 367 | 38.0M | int cmp = (next == nullptr || next == last_bigger37.5M ) | 368 | 38.0M | ? 12.37M : compare_(next->key, key)35.7M ; | 369 | 38.1M | if (cmp == 038.0M || (cmp > 0 && level == 07.41M )) { | 370 | 1.30M | return next; | 371 | 36.7M | } else if (cmp < 0) { | 372 | | // Keep searching in this list | 373 | 31.3M | x = next; | 374 | 31.3M | } else { | 375 | | // Switch to next list, reuse compare_() result | 376 | 5.37M | last_bigger = next; | 377 | 5.37M | level--; | 378 | 5.37M | } | 379 | 38.0M | } | 380 | 1.30M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::FindGreaterOrEqual(char const*) const Line | Count | Source | 352 | 184M | FindGreaterOrEqual(Key key) const { | 353 | | // Note: It looks like we could reduce duplication by implementing | 354 | | // this function as FindLessThan(key)->Next(0), but we wouldn't be able | 355 | | // to exit early on equality and the result wouldn't even be correct. | 356 | | // A concurrent insert might occur after FindLessThan(key) but before | 357 | | // we get a chance to call Next(0). | 358 | 184M | Node* x = head_; | 359 | 184M | int level = GetMaxHeight() - 1; | 360 | 184M | Node* last_bigger = nullptr; | 361 | 3.87G | while (true3.87G ) { | 362 | 3.87G | Node* next = x->Next(level); | 363 | | // Make sure the lists are sorted | 364 | 3.87G | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); | 365 | | // Make sure we haven't overshot during our search | 366 | 3.87G | DCHECK(x == head_ || KeyIsAfterNode(key, x)); | 367 | 3.87G | int cmp = (next == nullptr || next == last_bigger3.48G ) | 368 | 3.87G | ? 1642M : compare_(next->key, key)3.23G ; | 369 | 3.87G | if (cmp == 0 || (3.87G cmp > 03.87G && level == 01.40G )) { | 370 | 184M | return next; | 371 | 3.69G | } else if (cmp < 0) { | 372 | | // Keep searching in this list | 373 | 2.48G | x = next; | 374 | 2.48G | } else { | 375 | | // Switch to next list, reuse compare_() result | 376 | 1.20G | last_bigger = next; | 377 | 1.20G | level--; | 378 | 1.20G | } | 379 | 3.87G | } | 380 | 184M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::FindGreaterOrEqual(rocksdb::WriteBatchIndexEntry* const&) const Line | Count | Source | 352 | 756k | FindGreaterOrEqual(Key key) const { | 353 | | // Note: It looks like we could reduce duplication by implementing | 354 | | // this function as FindLessThan(key)->Next(0), but we wouldn't be able | 355 | | // to exit early on equality and the result wouldn't even be correct. | 356 | | // A concurrent insert might occur after FindLessThan(key) but before | 357 | | // we get a chance to call Next(0). | 358 | 756k | Node* x = head_; | 359 | 756k | int level = GetMaxHeight() - 1; | 360 | 756k | Node* last_bigger = nullptr; | 361 | 7.55M | while (true) { | 362 | 7.55M | Node* next = x->Next(level); | 363 | | // Make sure the lists are sorted | 364 | 7.55M | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); | 365 | | // Make sure we haven't overshot during our search | 366 | 7.55M | DCHECK(x == head_ || KeyIsAfterNode(key, x)); | 367 | 7.55M | int cmp = (next == nullptr || next == last_bigger7.53M ) | 368 | 7.55M | ? 1195k : compare_(next->key, key)7.36M ; | 369 | 7.55M | if (cmp == 0 || (cmp > 0 && level == 01.51M )) { | 370 | 756k | return next; | 371 | 6.80M | } else if (cmp < 0) { | 372 | | // Keep searching in this list | 373 | 6.04M | x = next; | 374 | 6.04M | } else { | 375 | | // Switch to next list, reuse compare_() result | 376 | 758k | last_bigger = next; | 377 | 758k | level--; | 378 | 758k | } | 379 | 7.55M | } | 380 | 756k | } |
|
381 | | |
382 | | template<class Key, class Comparator, class NodeType> |
383 | | typename SkipListBase<Key, Comparator, NodeType>::Node* |
384 | 184M | SkipListBase<Key, Comparator, NodeType>::FindLessThan(Key key, Node** prev) const { |
385 | 184M | Node* x = head_; |
386 | 184M | int level = GetMaxHeight() - 1; |
387 | | // KeyIsAfter(key, last_not_after) is definitely false |
388 | 184M | Node* last_not_after = nullptr; |
389 | 3.79G | while (true3.79G ) { |
390 | 3.79G | Node* next = x->Next(level); |
391 | 3.79G | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); |
392 | 3.79G | DCHECK(x == head_ || KeyIsAfterNode(key, x)); |
393 | 3.79G | if (next != last_not_after && KeyIsAfterNode(key, next)3.01G ) { |
394 | | // Keep searching in this list |
395 | 2.24G | x = next; |
396 | 2.24G | } else { |
397 | 1.57G | if (prev != nullptr1.55G ) { |
398 | 1.57G | prev[level] = x; |
399 | 1.57G | } |
400 | 1.55G | if (level == 0) { |
401 | 184M | return x; |
402 | 1.36G | } else { |
403 | | // Switch to next list, reuse KeyIUsAfterNode() result |
404 | 1.36G | last_not_after = next; |
405 | 1.36G | level--; |
406 | 1.36G | } |
407 | 1.55G | } |
408 | 3.79G | } |
409 | 184M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::FindLessThan(char const* const&, rocksdb::SkipListNode<char const*>**) const Line | Count | Source | 384 | 558k | SkipListBase<Key, Comparator, NodeType>::FindLessThan(Key key, Node** prev) const { | 385 | 558k | Node* x = head_; | 386 | 558k | int level = GetMaxHeight() - 1; | 387 | | // KeyIsAfter(key, last_not_after) is definitely false | 388 | 558k | Node* last_not_after = nullptr; | 389 | 19.5M | while (true) { | 390 | 19.5M | Node* next = x->Next(level); | 391 | 19.5M | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); | 392 | 19.5M | DCHECK(x == head_ || KeyIsAfterNode(key, x)); | 393 | 19.5M | if (next != last_not_after && KeyIsAfterNode(key, next)18.4M ) { | 394 | | // Keep searching in this list | 395 | 16.0M | x = next; | 396 | 16.0M | } else { | 397 | 3.52M | if (prev != nullptr) { | 398 | 3.52M | prev[level] = x; | 399 | 3.52M | } | 400 | 3.52M | if (level == 0) { | 401 | 558k | return x; | 402 | 2.96M | } else { | 403 | | // Switch to next list, reuse KeyIUsAfterNode() result | 404 | 2.96M | last_not_after = next; | 405 | 2.96M | level--; | 406 | 2.96M | } | 407 | 3.52M | } | 408 | 19.5M | } | 409 | 558k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::FindLessThan(char const*, rocksdb::SingleWriterInlineSkipListNode**) const Line | Count | Source | 384 | 183M | SkipListBase<Key, Comparator, NodeType>::FindLessThan(Key key, Node** prev) const { | 385 | 183M | Node* x = head_; | 386 | 183M | int level = GetMaxHeight() - 1; | 387 | | // KeyIsAfter(key, last_not_after) is definitely false | 388 | 183M | Node* last_not_after = nullptr; | 389 | 3.77G | while (true3.77G ) { | 390 | 3.77G | Node* next = x->Next(level); | 391 | 3.77G | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); | 392 | 3.77G | DCHECK(x == head_ || KeyIsAfterNode(key, x)); | 393 | 3.77G | if (next != last_not_after && KeyIsAfterNode(key, next)2.99G ) { | 394 | | // Keep searching in this list | 395 | 2.22G | x = next; | 396 | 2.22G | } else { | 397 | 1.57G | if (prev != nullptr1.54G ) { | 398 | 1.57G | prev[level] = x; | 399 | 1.57G | } | 400 | 1.54G | if (level == 0) { | 401 | 183M | return x; | 402 | 1.36G | } else { | 403 | | // Switch to next list, reuse KeyIUsAfterNode() result | 404 | 1.36G | last_not_after = next; | 405 | 1.36G | level--; | 406 | 1.36G | } | 407 | 1.54G | } | 408 | 3.77G | } | 409 | 183M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::FindLessThan(rocksdb::WriteBatchIndexEntry* const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>**) const Line | Count | Source | 384 | 304k | SkipListBase<Key, Comparator, NodeType>::FindLessThan(Key key, Node** prev) const { | 385 | 304k | Node* x = head_; | 386 | 304k | int level = GetMaxHeight() - 1; | 387 | | // KeyIsAfter(key, last_not_after) is definitely false | 388 | 304k | Node* last_not_after = nullptr; | 389 | 3.03M | while (true) { | 390 | 3.03M | Node* next = x->Next(level); | 391 | 3.03M | DCHECK(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); | 392 | 3.03M | DCHECK(x == head_ || KeyIsAfterNode(key, x)); | 393 | 3.03M | if (next != last_not_after && KeyIsAfterNode(key, next)2.96M ) { | 394 | | // Keep searching in this list | 395 | 2.42M | x = next; | 396 | 2.42M | } else { | 397 | 610k | if (prev != nullptr) { | 398 | 389 | prev[level] = x; | 399 | 389 | } | 400 | 610k | if (level == 0) { | 401 | 304k | return x; | 402 | 306k | } else { | 403 | | // Switch to next list, reuse KeyIUsAfterNode() result | 404 | 306k | last_not_after = next; | 405 | 306k | level--; | 406 | 306k | } | 407 | 610k | } | 408 | 3.03M | } | 409 | 304k | } |
|
410 | | |
411 | | template<class Key, class Comparator, class NodeType> |
412 | | typename SkipListBase<Key, Comparator, NodeType>::Node* |
413 | 1.26M | SkipListBase<Key, Comparator, NodeType>::FindLast() const { |
414 | 1.26M | Node* x = head_; |
415 | 1.26M | int level = GetMaxHeight() - 1; |
416 | 15.7M | while (true) { |
417 | 15.7M | Node* next = x->Next(level); |
418 | 15.7M | if (next == nullptr) { |
419 | 4.87M | if (level == 0) { |
420 | 1.25M | return x; |
421 | 3.61M | } else { |
422 | | // Switch to next list |
423 | 3.61M | level--; |
424 | 3.61M | } |
425 | 10.8M | } else { |
426 | 10.8M | x = next; |
427 | 10.8M | } |
428 | 15.7M | } |
429 | 1.26M | } Unexecuted instantiation: rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::FindLast() const rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::FindLast() const Line | Count | Source | 413 | 1.25M | SkipListBase<Key, Comparator, NodeType>::FindLast() const { | 414 | 1.25M | Node* x = head_; | 415 | 1.25M | int level = GetMaxHeight() - 1; | 416 | 15.7M | while (true) { | 417 | 15.7M | Node* next = x->Next(level); | 418 | 15.7M | if (next == nullptr) { | 419 | 4.87M | if (level == 0) { | 420 | 1.25M | return x; | 421 | 3.61M | } else { | 422 | | // Switch to next list | 423 | 3.61M | level--; | 424 | 3.61M | } | 425 | 10.8M | } else { | 426 | 10.8M | x = next; | 427 | 10.8M | } | 428 | 15.7M | } | 429 | 1.25M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::FindLast() const Line | Count | Source | 413 | 1.41k | SkipListBase<Key, Comparator, NodeType>::FindLast() const { | 414 | 1.41k | Node* x = head_; | 415 | 1.41k | int level = GetMaxHeight() - 1; | 416 | 8.83k | while (true) { | 417 | 8.83k | Node* next = x->Next(level); | 418 | 8.83k | if (next == nullptr) { | 419 | 3.16k | if (level == 0) { | 420 | 1.41k | return x; | 421 | 1.75k | } else { | 422 | | // Switch to next list | 423 | 1.75k | level--; | 424 | 1.75k | } | 425 | 5.67k | } else { | 426 | 5.67k | x = next; | 427 | 5.67k | } | 428 | 8.83k | } | 429 | 1.41k | } |
|
430 | | |
431 | | template<class Key, class Comparator, class NodeType> |
432 | 0 | uint64_t SkipListBase<Key, Comparator, NodeType>::EstimateCount(Key key) const { |
433 | 0 | uint64_t count = 0; |
434 | |
|
435 | 0 | Node* x = head_; |
436 | 0 | int level = GetMaxHeight() - 1; |
437 | 0 | while (true) { |
438 | 0 | DCHECK(x == head_ || compare_(x->key, key) < 0); |
439 | 0 | Node* next = x->Next(level); |
440 | 0 | if (next == nullptr || compare_(next->key, key) >= 0) { |
441 | 0 | if (level == 0) { |
442 | 0 | return count; |
443 | 0 | } else { |
444 | | // Switch to next list |
445 | 0 | count *= kBranching_; |
446 | 0 | level--; |
447 | 0 | } |
448 | 0 | } else { |
449 | 0 | x = next; |
450 | 0 | count++; |
451 | 0 | } |
452 | 0 | } |
453 | 0 | } |
454 | | |
455 | | template<class Key, class Comparator, class NodeType> |
456 | 371M | void SkipListBase<Key, Comparator, NodeType>::PrepareInsert(Key key) { |
457 | | // fast path for sequential insertion |
458 | 371M | if (prev_valid_ && !KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0))370M && |
459 | 371M | (337M prev_[0] == head_337M || KeyIsAfterNode(key, prev_[0])336M )) { |
460 | 18.4E | DCHECK(prev_[0] != head_ || (prev_height_ == 1 && GetMaxHeight() == 1)) |
461 | 18.4E | << "prev_height_: " << prev_height_ << ", GetMaxHeight(): " << GetMaxHeight(); |
462 | | |
463 | | // Outside of this method prev_[1..max_height_] is the predecessor |
464 | | // of prev_[0], and prev_height_ refers to prev_[0]. Inside Insert |
465 | | // prev_[0..max_height - 1] is the predecessor of key. Switch from |
466 | | // the external state to the internal |
467 | 442M | for (int i = 1; i < prev_height_; i++110M ) { |
468 | 110M | prev_[i] = prev_[0]; |
469 | 110M | } |
470 | 332M | } else { |
471 | | // TODO(opt): we could use a NoBarrier predecessor search as an |
472 | | // optimization for architectures where memory_order_acquire needs |
473 | | // a synchronization instruction. Doesn't matter on x86 |
474 | 38.8M | FindLessThan(key, prev_); |
475 | 38.8M | prev_valid_ = true; |
476 | 38.8M | } |
477 | | |
478 | | // Our data structure does not allow duplicate insertion |
479 | 371M | DCHECK(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->key)); |
480 | 371M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::PrepareInsert(char const* const&) Line | Count | Source | 456 | 1.07M | void SkipListBase<Key, Comparator, NodeType>::PrepareInsert(Key key) { | 457 | | // fast path for sequential insertion | 458 | 1.07M | if (prev_valid_1.07M && !KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) && | 459 | 1.07M | (795k prev_[0] == head_795k || KeyIsAfterNode(key, prev_[0])731k )) { | 460 | 515k | DCHECK(prev_[0] != head_ || (prev_height_ == 1 && GetMaxHeight() == 1)) | 461 | 1 | << "prev_height_: " << prev_height_ << ", GetMaxHeight(): " << GetMaxHeight(); | 462 | | | 463 | | // Outside of this method prev_[1..max_height_] is the predecessor | 464 | | // of prev_[0], and prev_height_ refers to prev_[0]. Inside Insert | 465 | | // prev_[0..max_height - 1] is the predecessor of key. Switch from | 466 | | // the external state to the internal | 467 | 665k | for (int i = 1; i < prev_height_; i++150k ) { | 468 | 150k | prev_[i] = prev_[0]; | 469 | 150k | } | 470 | 558k | } else { | 471 | | // TODO(opt): we could use a NoBarrier predecessor search as an | 472 | | // optimization for architectures where memory_order_acquire needs | 473 | | // a synchronization instruction. Doesn't matter on x86 | 474 | 558k | FindLessThan(key, prev_); | 475 | 558k | prev_valid_ = true; | 476 | 558k | } | 477 | | | 478 | | // Our data structure does not allow duplicate insertion | 479 | 1.07M | DCHECK(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->key)); | 480 | 1.07M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::PrepareInsert(char const*) Line | Count | Source | 456 | 362M | void SkipListBase<Key, Comparator, NodeType>::PrepareInsert(Key key) { | 457 | | // fast path for sequential insertion | 458 | 362M | if (prev_valid_ && !KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0))361M && | 459 | 362M | (329M prev_[0] == head_329M || KeyIsAfterNode(key, prev_[0])329M )) { | 460 | 18.4E | DCHECK(prev_[0] != head_ || (prev_height_ == 1 && GetMaxHeight() == 1)) | 461 | 18.4E | << "prev_height_: " << prev_height_ << ", GetMaxHeight(): " << GetMaxHeight(); | 462 | | | 463 | | // Outside of this method prev_[1..max_height_] is the predecessor | 464 | | // of prev_[0], and prev_height_ refers to prev_[0]. Inside Insert | 465 | | // prev_[0..max_height - 1] is the predecessor of key. Switch from | 466 | | // the external state to the internal | 467 | 432M | for (int i = 1; i < prev_height_; i++108M ) { | 468 | 108M | prev_[i] = prev_[0]; | 469 | 108M | } | 470 | 324M | } else { | 471 | | // TODO(opt): we could use a NoBarrier predecessor search as an | 472 | | // optimization for architectures where memory_order_acquire needs | 473 | | // a synchronization instruction. Doesn't matter on x86 | 474 | 38.2M | FindLessThan(key, prev_); | 475 | 38.2M | prev_valid_ = true; | 476 | 38.2M | } | 477 | | | 478 | | // Our data structure does not allow duplicate insertion | 479 | 362M | DCHECK(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->key)); | 480 | 362M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::PrepareInsert(rocksdb::WriteBatchIndexEntry* const&) Line | Count | Source | 456 | 7.28M | void SkipListBase<Key, Comparator, NodeType>::PrepareInsert(Key key) { | 457 | | // fast path for sequential insertion | 458 | 7.28M | if (prev_valid_7.28M && !KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) && | 459 | 7.28M | (7.28M prev_[0] == head_7.28M || KeyIsAfterNode(key, prev_[0])6.55M )) { | 460 | 7.28M | DCHECK(prev_[0] != head_ || (prev_height_ == 1 && GetMaxHeight() == 1)) | 461 | 296 | << "prev_height_: " << prev_height_ << ", GetMaxHeight(): " << GetMaxHeight(); | 462 | | | 463 | | // Outside of this method prev_[1..max_height_] is the predecessor | 464 | | // of prev_[0], and prev_height_ refers to prev_[0]. Inside Insert | 465 | | // prev_[0..max_height - 1] is the predecessor of key. Switch from | 466 | | // the external state to the internal | 467 | 9.47M | for (int i = 1; i < prev_height_; i++2.18M ) { | 468 | 2.18M | prev_[i] = prev_[0]; | 469 | 2.18M | } | 470 | 7.28M | } else { | 471 | | // TODO(opt): we could use a NoBarrier predecessor search as an | 472 | | // optimization for architectures where memory_order_acquire needs | 473 | | // a synchronization instruction. Doesn't matter on x86 | 474 | 2.47k | FindLessThan(key, prev_); | 475 | 2.47k | prev_valid_ = true; | 476 | 2.47k | } | 477 | | | 478 | | // Our data structure does not allow duplicate insertion | 479 | 7.28M | DCHECK(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->key)); | 480 | 7.28M | } |
|
481 | | |
482 | | template<class Key, class Comparator, class NodeType> |
483 | | SkipListBase<Key, Comparator, NodeType>::SkipListBase( |
484 | | const Comparator cmp, Allocator* allocator, int32_t max_height, int32_t branching_factor) |
485 | | : allocator_(allocator), |
486 | | kMaxHeight_(max_height), |
487 | | kBranching_(branching_factor), |
488 | | kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_), |
489 | | compare_(cmp), |
490 | | head_(Node::CreateHead(allocator, max_height)), |
491 | | max_height_(1), |
492 | 1.23M | prev_height_(1) { |
493 | 1.23M | DCHECK(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height)); |
494 | 1.23M | DCHECK(branching_factor > 0 && |
495 | 1.23M | kBranching_ == static_cast<uint32_t>(branching_factor)); |
496 | 1.23M | DCHECK_GT(kScaledInverseBranching_, 0); |
497 | | // Allocate the prev_ Node* array, directly from the passed-in allocator. |
498 | | // prev_ does not need to be freed, as its life cycle is tied up with |
499 | | // the allocator as a whole. |
500 | 1.23M | prev_ = reinterpret_cast<Node**>(allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); |
501 | 15.4M | for (int i = 0; i < kMaxHeight_; i++14.2M ) { |
502 | 14.2M | head_->SetNext(i, nullptr); |
503 | 14.2M | prev_[i] = head_; |
504 | 14.2M | } |
505 | 1.23M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::SkipListBase(rocksdb::MemTableRep::KeyComparator const&, rocksdb::Allocator*, int, int) Line | Count | Source | 492 | 64.7k | prev_height_(1) { | 493 | 64.7k | DCHECK(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height)); | 494 | 64.7k | DCHECK(branching_factor > 0 && | 495 | 64.7k | kBranching_ == static_cast<uint32_t>(branching_factor)); | 496 | 64.7k | DCHECK_GT(kScaledInverseBranching_, 0); | 497 | | // Allocate the prev_ Node* array, directly from the passed-in allocator. | 498 | | // prev_ does not need to be freed, as its life cycle is tied up with | 499 | | // the allocator as a whole. | 500 | 64.7k | prev_ = reinterpret_cast<Node**>(allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); | 501 | 334k | for (int i = 0; i < kMaxHeight_; i++269k ) { | 502 | 269k | head_->SetNext(i, nullptr); | 503 | 269k | prev_[i] = head_; | 504 | 269k | } | 505 | 64.7k | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::SkipListBase(rocksdb::MemTableRep::KeyComparator const&, rocksdb::Allocator*, int, int) Line | Count | Source | 492 | 430k | prev_height_(1) { | 493 | 430k | DCHECK(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height)); | 494 | 430k | DCHECK(branching_factor > 0 && | 495 | 430k | kBranching_ == static_cast<uint32_t>(branching_factor)); | 496 | 430k | DCHECK_GT(kScaledInverseBranching_, 0); | 497 | | // Allocate the prev_ Node* array, directly from the passed-in allocator. | 498 | | // prev_ does not need to be freed, as its life cycle is tied up with | 499 | | // the allocator as a whole. | 500 | 430k | prev_ = reinterpret_cast<Node**>(allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); | 501 | 5.58M | for (int i = 0; i < kMaxHeight_; i++5.15M ) { | 502 | 5.15M | head_->SetNext(i, nullptr); | 503 | 5.15M | prev_[i] = head_; | 504 | 5.15M | } | 505 | 430k | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::SkipListBase(rocksdb::WriteBatchEntryComparator const&, rocksdb::Allocator*, int, int) Line | Count | Source | 492 | 736k | prev_height_(1) { | 493 | 736k | DCHECK(max_height > 0 && kMaxHeight_ == static_cast<uint32_t>(max_height)); | 494 | 736k | DCHECK(branching_factor > 0 && | 495 | 736k | kBranching_ == static_cast<uint32_t>(branching_factor)); | 496 | 736k | DCHECK_GT(kScaledInverseBranching_, 0); | 497 | | // Allocate the prev_ Node* array, directly from the passed-in allocator. | 498 | | // prev_ does not need to be freed, as its life cycle is tied up with | 499 | | // the allocator as a whole. | 500 | 736k | prev_ = reinterpret_cast<Node**>(allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); | 501 | 9.53M | for (int i = 0; i < kMaxHeight_; i++8.80M ) { | 502 | 8.80M | head_->SetNext(i, nullptr); | 503 | 8.80M | prev_[i] = head_; | 504 | 8.80M | } | 505 | 736k | } |
|
506 | | |
507 | | template<class Key, class Comparator, class NodeType> |
508 | 371M | void SkipListBase<Key, Comparator, NodeType>::CompleteInsert(NodeType* node, int height) { |
509 | 371M | DCHECK_GT(height, 0); |
510 | 371M | DCHECK_LE(height, kMaxHeight_); |
511 | | |
512 | 371M | if (height > GetMaxHeight()) { |
513 | 2.48M | for (int i = GetMaxHeight(); i < height; i++1.42M ) { |
514 | 1.42M | prev_[i] = head_; |
515 | 1.42M | } |
516 | | |
517 | | // It is ok to mutate max_height_ without any synchronization |
518 | | // with concurrent readers. A concurrent reader that observes |
519 | | // the new value of max_height_ will see either the old value of |
520 | | // new level pointers from head_ (nullptr), or a new value set in |
521 | | // the loop below. In the former case the reader will |
522 | | // immediately drop to the next level since nullptr sorts after all |
523 | | // keys. In the latter case the reader will use the new node. |
524 | 1.06M | max_height_.store(height, std::memory_order_relaxed); |
525 | 1.06M | } |
526 | | |
527 | 866M | for (int i = 0; i < height; i++494M ) { |
528 | | // NoBarrier_SetNext() suffices since we will add a barrier when |
529 | | // we publish a pointer to "x" in prev[i]. |
530 | 494M | node->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i)); |
531 | 494M | prev_[i]->SetNext(i, node); |
532 | 494M | } |
533 | 371M | prev_[0] = node; |
534 | 371M | prev_height_ = height; |
535 | 371M | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::CompleteInsert(rocksdb::SkipListNode<char const*>*, int) Line | Count | Source | 508 | 1.07M | void SkipListBase<Key, Comparator, NodeType>::CompleteInsert(NodeType* node, int height) { | 509 | 1.07M | DCHECK_GT(height, 0); | 510 | 1.07M | DCHECK_LE(height, kMaxHeight_); | 511 | | | 512 | 1.07M | if (height > GetMaxHeight()) { | 513 | 58.0k | for (int i = GetMaxHeight(); i < height; i++32.9k ) { | 514 | 32.9k | prev_[i] = head_; | 515 | 32.9k | } | 516 | | | 517 | | // It is ok to mutate max_height_ without any synchronization | 518 | | // with concurrent readers. A concurrent reader that observes | 519 | | // the new value of max_height_ will see either the old value of | 520 | | // new level pointers from head_ (nullptr), or a new value set in | 521 | | // the loop below. In the former case the reader will | 522 | | // immediately drop to the next level since nullptr sorts after all | 523 | | // keys. In the latter case the reader will use the new node. | 524 | 25.1k | max_height_.store(height, std::memory_order_relaxed); | 525 | 25.1k | } | 526 | | | 527 | 2.50M | for (int i = 0; i < height; i++1.42M ) { | 528 | | // NoBarrier_SetNext() suffices since we will add a barrier when | 529 | | // we publish a pointer to "x" in prev[i]. | 530 | 1.42M | node->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i)); | 531 | 1.42M | prev_[i]->SetNext(i, node); | 532 | 1.42M | } | 533 | 1.07M | prev_[0] = node; | 534 | 1.07M | prev_height_ = height; | 535 | 1.07M | } |
rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::CompleteInsert(rocksdb::SingleWriterInlineSkipListNode*, int) Line | Count | Source | 508 | 362M | void SkipListBase<Key, Comparator, NodeType>::CompleteInsert(NodeType* node, int height) { | 509 | 362M | DCHECK_GT(height, 0); | 510 | 362M | DCHECK_LE(height, kMaxHeight_); | 511 | | | 512 | 362M | if (height > GetMaxHeight()) { | 513 | 371k | for (int i = GetMaxHeight(); i < height; i++212k ) { | 514 | 212k | prev_[i] = head_; | 515 | 212k | } | 516 | | | 517 | | // It is ok to mutate max_height_ without any synchronization | 518 | | // with concurrent readers. A concurrent reader that observes | 519 | | // the new value of max_height_ will see either the old value of | 520 | | // new level pointers from head_ (nullptr), or a new value set in | 521 | | // the loop below. In the former case the reader will | 522 | | // immediately drop to the next level since nullptr sorts after all | 523 | | // keys. In the latter case the reader will use the new node. | 524 | 159k | max_height_.store(height, std::memory_order_relaxed); | 525 | 159k | } | 526 | | | 527 | 846M | for (int i = 0; i < height; i++483M ) { | 528 | | // NoBarrier_SetNext() suffices since we will add a barrier when | 529 | | // we publish a pointer to "x" in prev[i]. | 530 | 483M | node->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i)); | 531 | 483M | prev_[i]->SetNext(i, node); | 532 | 483M | } | 533 | 362M | prev_[0] = node; | 534 | 362M | prev_height_ = height; | 535 | 362M | } |
rocksdb::SkipListBase<rocksdb::WriteBatchIndexEntry* const&, rocksdb::WriteBatchEntryComparator const&, rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*> >::CompleteInsert(rocksdb::SkipListNode<rocksdb::WriteBatchIndexEntry*>*, int) Line | Count | Source | 508 | 7.27M | void SkipListBase<Key, Comparator, NodeType>::CompleteInsert(NodeType* node, int height) { | 509 | 7.27M | DCHECK_GT(height, 0); | 510 | 7.27M | DCHECK_LE(height, kMaxHeight_); | 511 | | | 512 | 7.27M | if (height > GetMaxHeight()) { | 513 | 2.05M | for (int i = GetMaxHeight(); i < height; i++1.17M ) { | 514 | 1.17M | prev_[i] = head_; | 515 | 1.17M | } | 516 | | | 517 | | // It is ok to mutate max_height_ without any synchronization | 518 | | // with concurrent readers. A concurrent reader that observes | 519 | | // the new value of max_height_ will see either the old value of | 520 | | // new level pointers from head_ (nullptr), or a new value set in | 521 | | // the loop below. In the former case the reader will | 522 | | // immediately drop to the next level since nullptr sorts after all | 523 | | // keys. In the latter case the reader will use the new node. | 524 | 882k | max_height_.store(height, std::memory_order_relaxed); | 525 | 882k | } | 526 | | | 527 | 16.9M | for (int i = 0; i < height; i++9.69M ) { | 528 | | // NoBarrier_SetNext() suffices since we will add a barrier when | 529 | | // we publish a pointer to "x" in prev[i]. | 530 | 9.69M | node->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i)); | 531 | 9.69M | prev_[i]->SetNext(i, node); | 532 | 9.69M | } | 533 | 7.27M | prev_[0] = node; | 534 | 7.27M | prev_height_ = height; | 535 | 7.27M | } |
|
536 | | |
537 | | template<class Key, class Comparator, class NodeType> |
538 | 602k | bool SkipListBase<Key, Comparator, NodeType>::Contains(Key key) const { |
539 | 602k | Node* x = FindGreaterOrEqual(key); |
540 | 602k | return x != nullptr && Equal(key, x->key)562k ; |
541 | 602k | } rocksdb::SkipListBase<char const* const&, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SkipListNode<char const*> >::Contains(char const* const&) const Line | Count | Source | 538 | 602k | bool SkipListBase<Key, Comparator, NodeType>::Contains(Key key) const { | 539 | 602k | Node* x = FindGreaterOrEqual(key); | 540 | 602k | return x != nullptr && Equal(key, x->key)562k ; | 541 | 602k | } |
Unexecuted instantiation: rocksdb::SkipListBase<char const*, rocksdb::MemTableRep::KeyComparator const&, rocksdb::SingleWriterInlineSkipListNode>::Contains(char const*) const |
542 | | |
543 | | template<class Key, class Comparator, class NodeType> |
544 | 144M | bool SkipListBase<Key, Comparator, NodeType>::Erase(Key key, Comparator cmp) { |
545 | 144M | auto prev = static_cast<Node**>(alloca(sizeof(Node*) * kMaxHeight_)); |
546 | 144M | auto node = FindLessThan(key, prev); |
547 | 144M | node = node->Next(0); |
548 | 144M | if (node == nullptr || cmp(key, node->key) != 0141M ) { |
549 | 8.03M | return false; |
550 | 8.03M | } |
551 | | |
552 | 1.33G | for (int level = max_height_; 136M --level >= 0;) { |
553 | 1.20G | if (prev[level]->NoBarrier_Next(level) == node) { |
554 | 181M | prev[level]->SetNext(level, node->Next(level)); |
555 | 181M | } |
556 | 1.20G | } |
557 | | |
558 | 136M | prev_valid_ = false; |
559 | | |
560 | 136M | return true; |
561 | 144M | } |
562 | | |
563 | | struct SingleWriterInlineSkipListNode { |
564 | | std::atomic<SingleWriterInlineSkipListNode*> next_[1]; |
565 | | char key[0]; |
566 | | |
567 | 363M | explicit SingleWriterInlineSkipListNode(int height) { |
568 | 363M | memcpy(static_cast<void*>(&next_[0]), &height, sizeof(int)); |
569 | 363M | } |
570 | | |
571 | | // Accessors/mutators for links. Wrapped in methods so we can add |
572 | | // the appropriate barriers as necessary, and perform the necessary |
573 | | // addressing trickery for storing links below the Node in memory. |
574 | 8.90G | ATTRIBUTE_NO_SANITIZE_UNDEFINED SingleWriterInlineSkipListNode* Next(int n) { |
575 | 8.90G | DCHECK_GE(n, 0); |
576 | | // Use an 'acquire load' so that we observe a fully initialized |
577 | | // version of the returned Node. |
578 | 8.90G | return (next_[-n].load(std::memory_order_acquire)); |
579 | 8.90G | } |
580 | | |
581 | 670M | ATTRIBUTE_NO_SANITIZE_UNDEFINED void SetNext(int n, SingleWriterInlineSkipListNode* x) { |
582 | 670M | DCHECK_GE(n, 0); |
583 | | // Use a 'release store' so that anybody who reads through this |
584 | | // pointer observes a fully initialized version of the inserted node. |
585 | 670M | next_[-n].store(x, std::memory_order_release); |
586 | 670M | } |
587 | | |
588 | | // No-barrier variants that can be safely used in a few locations. |
589 | 2.04G | ATTRIBUTE_NO_SANITIZE_UNDEFINED SingleWriterInlineSkipListNode* NoBarrier_Next(int n) { |
590 | 2.04G | DCHECK_GE(n, 0); |
591 | 2.04G | return next_[-n].load(std::memory_order_relaxed); |
592 | 2.04G | } |
593 | | |
594 | 483M | ATTRIBUTE_NO_SANITIZE_UNDEFINED void NoBarrier_SetNext(int n, SingleWriterInlineSkipListNode* x) { |
595 | 483M | DCHECK_GE(n, 0); |
596 | 483M | next_[-n].store(x, std::memory_order_relaxed); |
597 | 483M | } |
598 | | |
599 | 362M | int UnstashHeight() const { |
600 | 362M | int rv; |
601 | 362M | memcpy(&rv, &next_[0], sizeof(int)); |
602 | 362M | return rv; |
603 | 362M | } |
604 | | |
605 | 363M | static SingleWriterInlineSkipListNode* Create(Allocator* allocator, size_t key_size, int height) { |
606 | 363M | auto prefix = sizeof(std::atomic<SingleWriterInlineSkipListNode*>) * (height - 1); |
607 | | |
608 | | // prefix is space for the height - 1 pointers that we store before |
609 | | // the Node instance (next_[-(height - 1) .. -1]). Node starts at |
610 | | // raw + prefix, and holds the bottom-mode (level 0) skip list pointer |
611 | | // next_[0]. key_size is the bytes for the key. |
612 | 363M | char* mem = allocator->AllocateAligned( |
613 | 363M | prefix + offsetof(SingleWriterInlineSkipListNode, key) + key_size); |
614 | 363M | return new (mem + prefix) SingleWriterInlineSkipListNode(height); |
615 | 363M | } |
616 | | |
617 | 430k | static SingleWriterInlineSkipListNode* CreateHead(Allocator* allocator, int height) { |
618 | 430k | return Create(allocator, 0 /* key_size */, height); |
619 | 430k | } |
620 | | }; |
621 | | |
622 | | // Skip list designed for using variable size byte arrays as a key. |
623 | | // Node has variable size and key is copied directly to the node. |
624 | | // Please note thread safety at top of this file. |
625 | | template <class Comparator> |
626 | | class SingleWriterInlineSkipList : |
627 | | public SkipListBase<const char*, Comparator, SingleWriterInlineSkipListNode> { |
628 | | private: |
629 | | typedef SkipListBase<const char*, Comparator, SingleWriterInlineSkipListNode> Base; |
630 | | |
631 | | public: |
632 | | template<class... Args> |
633 | | explicit SingleWriterInlineSkipList(Args&&... args) |
634 | 430k | : Base(std::forward<Args>(args)...) { |
635 | 430k | } |
636 | | |
637 | 362M | char* AllocateKey(size_t key_size) { |
638 | 362M | return SingleWriterInlineSkipListNode::Create( |
639 | 362M | Base::allocator_, key_size, Base::RandomHeight())->key; |
640 | 362M | } |
641 | | |
642 | 362M | void Insert(const char* key) { |
643 | 362M | Base::PrepareInsert(key); |
644 | 362M | auto node = reinterpret_cast<SingleWriterInlineSkipListNode*>( |
645 | 362M | const_cast<char*>(key) - offsetof(SingleWriterInlineSkipListNode, key)); |
646 | 362M | Base::CompleteInsert(node, node->UnstashHeight()); |
647 | 362M | } |
648 | | |
649 | 0 | void InsertConcurrently(const char* key) { |
650 | 0 | LOG(FATAL) << "Concurrent insert is not supported"; |
651 | 0 | } |
652 | | }; |
653 | | |
654 | | } // namespace rocksdb |
655 | | |
656 | | #endif // YB_ROCKSDB_DB_SKIPLIST_H |