/Users/deen/code/yugabyte-db/src/yb/rocksdb/memtable/vectorrep.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | #ifndef ROCKSDB_LITE |
21 | | #include <unordered_set> |
22 | | #include <set> |
23 | | #include <memory> |
24 | | #include <algorithm> |
25 | | #include <type_traits> |
26 | | |
27 | | #include "yb/rocksdb/memtablerep.h" |
28 | | |
29 | | #include "yb/rocksdb/util/arena.h" |
30 | | #include "yb/rocksdb/db/memtable.h" |
31 | | #include "yb/rocksdb/memtable/stl_wrappers.h" |
32 | | #include "yb/rocksdb/port/port.h" |
33 | | #include "yb/rocksdb/util/mutexlock.h" |
34 | | |
35 | | namespace rocksdb { |
36 | | namespace { |
37 | | |
38 | | using stl_wrappers::Compare; |
39 | | |
40 | | class VectorRep : public MemTableRep { |
41 | | public: |
42 | | VectorRep(const KeyComparator& compare, MemTableAllocator* allocator, |
43 | | size_t count); |
44 | | |
45 | | // Insert key into the collection. (The caller will pack key and value into a |
46 | | // single buffer and pass that in as the parameter to Insert) |
47 | | // REQUIRES: nothing that compares equal to key is currently in the |
48 | | // collection. |
49 | | void Insert(KeyHandle handle) override; |
50 | | |
51 | | // Returns true iff an entry that compares equal to key is in the collection. |
52 | | bool Contains(const char* key) const override; |
53 | | |
54 | | void MarkReadOnly() override; |
55 | | |
56 | | size_t ApproximateMemoryUsage() override; |
57 | | |
58 | | virtual void Get(const LookupKey& k, void* callback_args, |
59 | | bool (*callback_func)(void* arg, |
60 | | const char* entry)) override; |
61 | | |
62 | 536 | ~VectorRep() override { } |
63 | | |
64 | | class Iterator : public MemTableRep::Iterator { |
65 | | class VectorRep* vrep_; |
66 | | std::shared_ptr<std::vector<const char*>> bucket_; |
67 | | std::vector<const char*>::const_iterator mutable cit_; |
68 | | const KeyComparator& compare_; |
69 | | std::string tmp_; // For passing to EncodeKey |
70 | | bool mutable sorted_; |
71 | | void DoSort() const; |
72 | | public: |
73 | | explicit Iterator(class VectorRep* vrep, |
74 | | std::shared_ptr<std::vector<const char*>> bucket, |
75 | | const KeyComparator& compare); |
76 | | |
77 | | // Initialize an iterator over the specified collection. |
78 | | // The returned iterator is not valid. |
79 | | // explicit Iterator(const MemTableRep* collection); |
80 | 30.5k | ~Iterator() override { }; |
81 | | |
82 | | // Returns true iff the iterator is positioned at a valid node. |
83 | | bool Valid() const override; |
84 | | |
85 | | // Returns the key at the current position. |
86 | | // REQUIRES: Valid() |
87 | | const char* key() const override; |
88 | | |
89 | | // Advances to the next position. |
90 | | // REQUIRES: Valid() |
91 | | void Next() override; |
92 | | |
93 | | // Advances to the previous position. |
94 | | // REQUIRES: Valid() |
95 | | void Prev() override; |
96 | | |
97 | | // Advance to the first entry with a key >= target |
98 | | void Seek(const Slice& user_key, const char* memtable_key) override; |
99 | | |
100 | | // Position at the first entry in collection. |
101 | | // Final state of iterator is Valid() iff collection is not empty. |
102 | | void SeekToFirst() override; |
103 | | |
104 | | // Position at the last entry in collection. |
105 | | // Final state of iterator is Valid() iff collection is not empty. |
106 | | void SeekToLast() override; |
107 | | }; |
108 | | |
109 | | // Return an iterator over the keys in this representation. |
110 | | MemTableRep::Iterator* GetIterator(Arena* arena) override; |
111 | | |
112 | | private: |
113 | | friend class Iterator; |
114 | | typedef std::vector<const char*> Bucket; |
115 | | std::shared_ptr<Bucket> bucket_; |
116 | | mutable port::RWMutex rwlock_; |
117 | | bool immutable_; |
118 | | bool sorted_; |
119 | | const KeyComparator& compare_; |
120 | | }; |
121 | | |
122 | 43.5k | void VectorRep::Insert(KeyHandle handle) { |
123 | 43.5k | auto* key = static_cast<char*>(handle); |
124 | 43.5k | WriteLock l(&rwlock_); |
125 | 43.5k | assert(!immutable_); |
126 | 43.5k | bucket_->push_back(key); |
127 | 43.5k | } |
128 | | |
129 | | // Returns true iff an entry that compares equal to key is in the collection. |
130 | 0 | bool VectorRep::Contains(const char* key) const { |
131 | 0 | ReadLock l(&rwlock_); |
132 | 0 | return std::find(bucket_->begin(), bucket_->end(), key) != bucket_->end(); |
133 | 0 | } |
134 | | |
135 | 83 | void VectorRep::MarkReadOnly() { |
136 | 83 | WriteLock l(&rwlock_); |
137 | 83 | immutable_ = true; |
138 | 83 | } |
139 | | |
140 | 44.5k | size_t VectorRep::ApproximateMemoryUsage() { |
141 | 44.5k | return |
142 | 44.5k | sizeof(bucket_) + sizeof(*bucket_) + |
143 | 44.5k | bucket_->size() * |
144 | 44.5k | sizeof( |
145 | 44.5k | std::remove_reference<decltype(*bucket_)>::type::value_type |
146 | 44.5k | ); |
147 | 44.5k | } |
148 | | |
149 | | VectorRep::VectorRep(const KeyComparator& compare, MemTableAllocator* allocator, |
150 | | size_t count) |
151 | | : MemTableRep(allocator), |
152 | | bucket_(new Bucket()), |
153 | | immutable_(false), |
154 | | sorted_(false), |
155 | 536 | compare_(compare) { bucket_.get()->reserve(count); } |
156 | | |
157 | | VectorRep::Iterator::Iterator(class VectorRep* vrep, |
158 | | std::shared_ptr<std::vector<const char*>> bucket, |
159 | | const KeyComparator& compare) |
160 | | : vrep_(vrep), |
161 | | bucket_(bucket), |
162 | | cit_(bucket_->end()), |
163 | | compare_(compare), |
164 | 30.5k | sorted_(false) { } |
165 | | |
166 | 105k | void VectorRep::Iterator::DoSort() const { |
167 | | // vrep is non-null means that we are working on an immutable memtable |
168 | 105k | if (!sorted_ && vrep_ != nullptr) { |
169 | 84 | WriteLock l(&vrep_->rwlock_); |
170 | 84 | if (!vrep_->sorted_) { |
171 | 82 | std::sort(bucket_->begin(), bucket_->end(), Compare(compare_)); |
172 | 82 | cit_ = bucket_->begin(); |
173 | 82 | vrep_->sorted_ = true; |
174 | 82 | } |
175 | 84 | sorted_ = true; |
176 | 84 | } |
177 | 105k | if (!sorted_) { |
178 | 30.4k | std::sort(bucket_->begin(), bucket_->end(), Compare(compare_)); |
179 | 30.4k | cit_ = bucket_->begin(); |
180 | 30.4k | sorted_ = true; |
181 | 30.4k | } |
182 | 105k | assert(sorted_); |
183 | 105k | assert(vrep_ == nullptr || vrep_->sorted_); |
184 | 105k | } |
185 | | |
186 | | // Returns true iff the iterator is positioned at a valid node. |
187 | 75.3k | bool VectorRep::Iterator::Valid() const { |
188 | 75.3k | DoSort(); |
189 | 75.3k | return cit_ != bucket_->end(); |
190 | 75.3k | } |
191 | | |
192 | | // Returns the key at the current position. |
193 | | // REQUIRES: Valid() |
194 | 100k | const char* VectorRep::Iterator::key() const { |
195 | 100k | assert(sorted_); |
196 | 100k | return *cit_; |
197 | 100k | } |
198 | | |
199 | | // Advances to the next position. |
200 | | // REQUIRES: Valid() |
201 | 44.5k | void VectorRep::Iterator::Next() { |
202 | 44.5k | assert(sorted_); |
203 | 44.5k | if (cit_ == bucket_->end()) { |
204 | 0 | return; |
205 | 0 | } |
206 | 44.5k | ++cit_; |
207 | 44.5k | } |
208 | | |
209 | | // Advances to the previous position. |
210 | | // REQUIRES: Valid() |
211 | 62 | void VectorRep::Iterator::Prev() { |
212 | 62 | assert(sorted_); |
213 | 62 | if (cit_ == bucket_->begin()) { |
214 | | // If you try to go back from the first element, the iterator should be |
215 | | // invalidated. So we set it to past-the-end. This means that you can |
216 | | // treat the container circularly. |
217 | 3 | cit_ = bucket_->end(); |
218 | 59 | } else { |
219 | 59 | --cit_; |
220 | 59 | } |
221 | 62 | } |
222 | | |
223 | | // Advance to the first entry with a key >= target |
224 | | void VectorRep::Iterator::Seek(const Slice& user_key, |
225 | 30.1k | const char* memtable_key) { |
226 | 30.1k | DoSort(); |
227 | | // Do binary search to find first value not less than the target |
228 | 30.1k | const char* encoded_key = |
229 | 29.6k | (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key); |
230 | 30.1k | cit_ = std::equal_range(bucket_->begin(), |
231 | 30.1k | bucket_->end(), |
232 | 30.1k | encoded_key, |
233 | 357k | [this] (const char* a, const char* b) { |
234 | 357k | return compare_(a, b) < 0; |
235 | 357k | }).first; |
236 | 30.1k | } |
237 | | |
238 | | // Position at the first entry in collection. |
239 | | // Final state of iterator is Valid() iff collection is not empty. |
240 | 502 | void VectorRep::Iterator::SeekToFirst() { |
241 | 502 | DoSort(); |
242 | 502 | cit_ = bucket_->begin(); |
243 | 502 | } |
244 | | |
245 | | // Position at the last entry in collection. |
246 | | // Final state of iterator is Valid() iff collection is not empty. |
247 | 6 | void VectorRep::Iterator::SeekToLast() { |
248 | 6 | DoSort(); |
249 | 6 | cit_ = bucket_->end(); |
250 | 6 | if (bucket_->size() != 0) { |
251 | 6 | --cit_; |
252 | 6 | } |
253 | 6 | } |
254 | | |
255 | | void VectorRep::Get(const LookupKey& k, void* callback_args, |
256 | 29.6k | bool (*callback_func)(void* arg, const char* entry)) { |
257 | 29.6k | rwlock_.ReadLock(); |
258 | 29.6k | VectorRep* vector_rep; |
259 | 29.6k | std::shared_ptr<Bucket> bucket; |
260 | 29.6k | if (immutable_) { |
261 | 2 | vector_rep = this; |
262 | 29.6k | } else { |
263 | 29.6k | vector_rep = nullptr; |
264 | 29.6k | bucket.reset(new Bucket(*bucket_)); // make a copy |
265 | 29.6k | } |
266 | 29.6k | VectorRep::Iterator iter(vector_rep, immutable_ ? bucket_ : bucket, compare_); |
267 | 29.6k | rwlock_.ReadUnlock(); |
268 | | |
269 | 29.6k | for (iter.Seek(k.user_key(), k.memtable_key().cdata()); |
270 | 29.6k | iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) { |
271 | 0 | } |
272 | 29.6k | } |
273 | | |
274 | 948 | MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) { |
275 | 948 | char* mem = nullptr; |
276 | 948 | if (arena != nullptr) { |
277 | 948 | mem = arena->AllocateAligned(sizeof(Iterator)); |
278 | 948 | } |
279 | 948 | ReadLock l(&rwlock_); |
280 | | // Do not sort here. The sorting would be done the first time |
281 | | // a Seek is performed on the iterator. |
282 | 948 | if (immutable_) { |
283 | 82 | if (arena == nullptr) { |
284 | 0 | return new Iterator(this, bucket_, compare_); |
285 | 82 | } else { |
286 | 82 | return new (mem) Iterator(this, bucket_, compare_); |
287 | 82 | } |
288 | 866 | } else { |
289 | 866 | std::shared_ptr<Bucket> tmp; |
290 | 866 | tmp.reset(new Bucket(*bucket_)); // make a copy |
291 | 866 | if (arena == nullptr) { |
292 | 0 | return new Iterator(nullptr, tmp, compare_); |
293 | 866 | } else { |
294 | 866 | return new (mem) Iterator(nullptr, tmp, compare_); |
295 | 866 | } |
296 | 866 | } |
297 | 948 | } |
298 | | } // anon namespace |
299 | | |
300 | | MemTableRep* VectorRepFactory::CreateMemTableRep( |
301 | | const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, |
302 | 536 | const SliceTransform*, Logger* logger) { |
303 | 536 | return new VectorRep(compare, allocator, count_); |
304 | 536 | } |
305 | | } // namespace rocksdb |
306 | | #endif // ROCKSDB_LITE |