/Users/deen/code/yugabyte-db/src/yb/docdb/doc_key.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) YugaByte, Inc. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
4 | | // in compliance with the License. You may obtain a copy of the License at |
5 | | // |
6 | | // http://www.apache.org/licenses/LICENSE-2.0 |
7 | | // |
8 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
9 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
10 | | // or implied. See the License for the specific language governing permissions and limitations |
11 | | // under the License. |
12 | | // |
13 | | |
14 | | #include "yb/docdb/doc_key.h" |
15 | | |
16 | | #include <memory> |
17 | | #include <sstream> |
18 | | |
19 | | #include "yb/common/schema.h" |
20 | | |
21 | | #include "yb/docdb/doc_kv_util.h" |
22 | | #include "yb/docdb/doc_path.h" |
23 | | #include "yb/docdb/primitive_value.h" |
24 | | #include "yb/docdb/value_type.h" |
25 | | |
26 | | #include "yb/gutil/strings/substitute.h" |
27 | | |
28 | | #include "yb/util/compare_util.h" |
29 | | #include "yb/util/enums.h" |
30 | | #include "yb/util/result.h" |
31 | | #include "yb/util/status_format.h" |
32 | | #include "yb/util/string_util.h" |
33 | | #include "yb/util/tostring.h" |
34 | | #include "yb/util/uuid.h" |
35 | | |
36 | | using std::ostringstream; |
37 | | |
38 | | using strings::Substitute; |
39 | | |
40 | | using yb::util::CompareVectors; |
41 | | using yb::util::CompareUsingLessThan; |
42 | | |
43 | | namespace yb { |
44 | | namespace docdb { |
45 | | |
46 | | namespace { |
47 | | |
48 | | // Checks whether slice starts with primitive value. |
49 | | // Valid cases are end of group or primitive value starting with value type. |
50 | 2.71G | Result<bool> HasPrimitiveValue(Slice* slice, AllowSpecial allow_special) { |
51 | 2.71G | if (PREDICT_FALSE(slice->empty())) { |
52 | 136 | return STATUS(Corruption, "Unexpected end of key when decoding document key"); |
53 | 136 | } |
54 | 2.71G | ValueType current_value_type = static_cast<ValueType>(*slice->data()); |
55 | 2.71G | if (current_value_type == ValueType::kGroupEnd) { |
56 | 1.26G | slice->consume_byte(); |
57 | 1.26G | return false; |
58 | 1.26G | } |
59 | | |
60 | 1.44G | if (1.44G IsPrimitiveValueType(current_value_type)1.44G ) { |
61 | 1.44G | return true; |
62 | 1.44G | } |
63 | | |
64 | 18.4E | if (allow_special && IsSpecialValueType(current_value_type)449k ) { |
65 | 449k | return true; |
66 | 449k | } |
67 | | |
68 | 18.4E | return STATUS_FORMAT(Corruption, "Expected a primitive value type, got $0", current_value_type); |
69 | 18.4E | } |
70 | | |
71 | | constexpr auto kNumValuesNoLimit = std::numeric_limits<int>::max(); |
72 | | |
73 | | // Consumes up to n_values_limit primitive values from key until group end is found. |
74 | | // Callback is called for each value and responsible for consuming this single value from slice. |
75 | | template<class Callback> |
76 | | Status ConsumePrimitiveValuesFromKey( |
77 | | Slice* slice, AllowSpecial allow_special, Callback callback, |
78 | 1.18G | int n_values_limit = kNumValuesNoLimit) { |
79 | 1.18G | const auto initial_slice(*slice); // For error reporting. |
80 | 2.54G | for (; n_values_limit > 0; --n_values_limit1.35G ) { |
81 | 2.51G | if (!VERIFY_RESULT(HasPrimitiveValue(slice, allow_special))) { |
82 | 1.15G | return Status::OK(); |
83 | 1.15G | } |
84 | | |
85 | 1.36G | RETURN_NOT_OK_PREPEND(callback(), |
86 | 1.36G | Substitute("while consuming primitive values from $0", |
87 | 1.36G | initial_slice.ToDebugHexString())); |
88 | 1.35G | } |
89 | 26.6M | return Status::OK(); |
90 | 1.18G | } doc_key.cc:yb::Status yb::docdb::(anonymous namespace)::ConsumePrimitiveValuesFromKey<yb::docdb::(anonymous namespace)::ConsumePrimitiveValuesFromKey(yb::Slice*, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, std::__1::vector<yb::docdb::PrimitiveValue, std::__1::allocator<yb::docdb::PrimitiveValue> >*, int)::$_1>(yb::Slice*, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::(anonymous namespace)::ConsumePrimitiveValuesFromKey(yb::Slice*, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, std::__1::vector<yb::docdb::PrimitiveValue, std::__1::allocator<yb::docdb::PrimitiveValue> >*, int)::$_1, int) Line | Count | Source | 78 | 560M | int n_values_limit = kNumValuesNoLimit) { | 79 | 560M | const auto initial_slice(*slice); // For error reporting. | 80 | 1.22G | for (; n_values_limit > 01.22G ; --n_values_limit664M ) { | 81 | 1.22G | if (!VERIFY_RESULT(HasPrimitiveValue(slice, allow_special))) { | 82 | 560M | return Status::OK(); | 83 | 560M | } | 84 | | | 85 | 664M | RETURN_NOT_OK_PREPEND(callback(), | 86 | 664M | Substitute("while consuming primitive values from $0", | 87 | 664M | initial_slice.ToDebugHexString())); | 88 | 664M | } | 89 | 18.4E | return Status::OK(); | 90 | 560M | } |
doc_key.cc:yb::Status yb::docdb::(anonymous namespace)::ConsumePrimitiveValuesFromKey<yb::docdb::(anonymous namespace)::ConsumePrimitiveValuesFromKey(yb::Slice*, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, boost::container::small_vector_base<yb::Slice, void, void>*, int)::$_0>(yb::Slice*, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::(anonymous namespace)::ConsumePrimitiveValuesFromKey(yb::Slice*, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, boost::container::small_vector_base<yb::Slice, void, void>*, int)::$_0, int) Line | Count | Source | 78 | 624M | int n_values_limit = kNumValuesNoLimit) { | 79 | 624M | const auto initial_slice(*slice); // For error reporting. | 80 | 1.31G | for (; n_values_limit > 0; --n_values_limit695M ) { | 81 | 1.29G | if (!VERIFY_RESULT(HasPrimitiveValue(slice, allow_special))) { | 82 | 596M | return Status::OK(); | 83 | 596M | } | 84 | | | 85 | 695M | RETURN_NOT_OK_PREPEND(callback(), | 86 | 695M | Substitute("while consuming primitive values from $0", | 87 | 695M | initial_slice.ToDebugHexString())); | 88 | 695M | } | 89 | 26.6M | return Status::OK(); | 90 | 624M | } |
|
91 | | |
92 | | Status ConsumePrimitiveValuesFromKey(Slice* slice, AllowSpecial allow_special, |
93 | | boost::container::small_vector_base<Slice>* result, |
94 | 624M | int n_values_limit = kNumValuesNoLimit) { |
95 | 696M | return ConsumePrimitiveValuesFromKey(slice, allow_special, [slice, result]() -> Status { |
96 | 696M | auto begin = slice->data(); |
97 | 696M | RETURN_NOT_OK(PrimitiveValue::DecodeKey(slice, /* out */ nullptr)); |
98 | 695M | if (result) { |
99 | 58.1M | result->emplace_back(begin, slice->data()); |
100 | 58.1M | } |
101 | 695M | return Status::OK(); |
102 | 696M | }, n_values_limit); |
103 | 624M | } |
104 | | |
105 | | Status ConsumePrimitiveValuesFromKey( |
106 | | Slice* slice, AllowSpecial allow_special, std::vector<PrimitiveValue>* result, |
107 | 560M | int n_values_limit = kNumValuesNoLimit) { |
108 | 665M | return ConsumePrimitiveValuesFromKey(slice, allow_special, [slice, result] { |
109 | 665M | result->emplace_back(); |
110 | 665M | return result->back().DecodeFromKey(slice); |
111 | 665M | }, n_values_limit); |
112 | 560M | } |
113 | | |
114 | | } // namespace |
115 | | |
116 | 150M | Result<bool> ConsumePrimitiveValueFromKey(Slice* slice) { |
117 | 150M | if (!VERIFY_RESULT(HasPrimitiveValue(slice, AllowSpecial::kFalse))) { |
118 | 89.0M | return false; |
119 | 89.0M | } |
120 | 61.4M | RETURN_NOT_OK(PrimitiveValue::DecodeKey(slice, nullptr /* out */)); |
121 | 61.4M | return true; |
122 | 61.4M | } |
123 | | |
124 | 0 | Status ConsumePrimitiveValuesFromKey(Slice* slice, std::vector<PrimitiveValue>* result) { |
125 | 0 | return ConsumePrimitiveValuesFromKey(slice, AllowSpecial::kFalse, result); |
126 | 0 | } |
127 | | |
128 | | // ------------------------------------------------------------------------------------------------ |
129 | | // DocKey |
130 | | // ------------------------------------------------------------------------------------------------ |
131 | | |
132 | | DocKey::DocKey() |
133 | | : cotable_id_(Uuid::Nil()), |
134 | | colocation_id_(kColocationIdNotSet), |
135 | | hash_present_(false), |
136 | 450M | hash_(0) { |
137 | 450M | } |
138 | | |
139 | | DocKey::DocKey(std::vector<PrimitiveValue> range_components) |
140 | | : cotable_id_(Uuid::Nil()), |
141 | | colocation_id_(kColocationIdNotSet), |
142 | | hash_present_(false), |
143 | | hash_(0), |
144 | 4.54M | range_group_(std::move(range_components)) { |
145 | 4.54M | } |
146 | | |
147 | | DocKey::DocKey(DocKeyHash hash, |
148 | | std::vector<PrimitiveValue> hashed_components, |
149 | | std::vector<PrimitiveValue> range_components) |
150 | | : cotable_id_(Uuid::Nil()), |
151 | | colocation_id_(kColocationIdNotSet), |
152 | | hash_present_(true), |
153 | | hash_(hash), |
154 | | hashed_group_(std::move(hashed_components)), |
155 | 10.5M | range_group_(std::move(range_components)) { |
156 | 10.5M | } |
157 | | |
158 | | DocKey::DocKey(const Uuid& cotable_id, |
159 | | DocKeyHash hash, |
160 | | std::vector<PrimitiveValue> hashed_components, |
161 | | std::vector<PrimitiveValue> range_components) |
162 | | : cotable_id_(cotable_id), |
163 | | colocation_id_(kColocationIdNotSet), |
164 | | hash_present_(true), |
165 | | hash_(hash), |
166 | | hashed_group_(std::move(hashed_components)), |
167 | 192 | range_group_(std::move(range_components)) { |
168 | 192 | } |
169 | | |
170 | | DocKey::DocKey(const ColocationId colocation_id, |
171 | | DocKeyHash hash, |
172 | | std::vector<PrimitiveValue> hashed_components, |
173 | | std::vector<PrimitiveValue> range_components) |
174 | | : cotable_id_(Uuid::Nil()), |
175 | | colocation_id_(colocation_id), |
176 | | hash_present_(true), |
177 | | hash_(hash), |
178 | | hashed_group_(std::move(hashed_components)), |
179 | 384 | range_group_(std::move(range_components)) { |
180 | 384 | } |
181 | | |
182 | | DocKey::DocKey(const Uuid& cotable_id) |
183 | | : cotable_id_(cotable_id), |
184 | | colocation_id_(kColocationIdNotSet), |
185 | | hash_present_(false), |
186 | 0 | hash_(0) { |
187 | 0 | } |
188 | | |
189 | | DocKey::DocKey(const ColocationId colocation_id) |
190 | | : cotable_id_(Uuid::Nil()), |
191 | | colocation_id_(colocation_id), |
192 | | hash_present_(false), |
193 | 3 | hash_(0) { |
194 | 3 | } |
195 | | |
196 | | DocKey::DocKey(const Schema& schema) |
197 | | : cotable_id_(schema.cotable_id()), |
198 | | colocation_id_(schema.colocation_id()), |
199 | | hash_present_(false), |
200 | 13.4M | hash_(0) { |
201 | 13.4M | } |
202 | | |
203 | | DocKey::DocKey(const Schema& schema, DocKeyHash hash) |
204 | | : cotable_id_(schema.cotable_id()), |
205 | | colocation_id_(schema.colocation_id()), |
206 | | hash_present_(true), |
207 | 0 | hash_(hash) { |
208 | 0 | } |
209 | | |
210 | | DocKey::DocKey(const Schema& schema, std::vector<PrimitiveValue> range_components) |
211 | | : cotable_id_(schema.cotable_id()), |
212 | | colocation_id_(schema.colocation_id()), |
213 | | hash_present_(false), |
214 | | hash_(0), |
215 | 7.20M | range_group_(std::move(range_components)) { |
216 | 7.20M | } |
217 | | |
218 | | DocKey::DocKey(const Schema& schema, DocKeyHash hash, |
219 | | std::vector<PrimitiveValue> hashed_components, |
220 | | std::vector<PrimitiveValue> range_components) |
221 | | : cotable_id_(schema.cotable_id()), |
222 | | colocation_id_(schema.colocation_id()), |
223 | | hash_present_(true), |
224 | | hash_(hash), |
225 | | hashed_group_(std::move(hashed_components)), |
226 | 3.44M | range_group_(std::move(range_components)) { |
227 | 3.44M | } |
228 | | |
229 | 32.4M | KeyBytes DocKey::Encode() const { |
230 | 32.4M | KeyBytes result; |
231 | 32.4M | AppendTo(&result); |
232 | 32.4M | return result; |
233 | 32.4M | } |
234 | | |
235 | | namespace { |
236 | | |
237 | | // Used as cache of allocated memory by EncodeAsRefCntPrefix. |
238 | | thread_local boost::optional<KeyBytes> thread_local_encode_buffer; |
239 | | |
240 | | } |
241 | | |
242 | 17.6M | RefCntPrefix DocKey::EncodeAsRefCntPrefix() const { |
243 | 17.6M | KeyBytes* encode_buffer = thread_local_encode_buffer.get_ptr(); |
244 | 17.6M | if (!encode_buffer) { |
245 | 52.8k | thread_local_encode_buffer.emplace(); |
246 | 52.8k | encode_buffer = thread_local_encode_buffer.get_ptr(); |
247 | 52.8k | } |
248 | 17.6M | encode_buffer->Clear(); |
249 | 17.6M | AppendTo(encode_buffer); |
250 | 17.6M | return RefCntPrefix(encode_buffer->AsSlice()); |
251 | 17.6M | } |
252 | | |
253 | 50.0M | void DocKey::AppendTo(KeyBytes* out) const { |
254 | 50.0M | auto encoder = DocKeyEncoder(out); |
255 | 50.0M | if (!cotable_id_.IsNil()) { |
256 | 9.98M | encoder.CotableId(cotable_id_).Hash(hash_present_, hash_, hashed_group_).Range(range_group_); |
257 | 40.0M | } else { |
258 | 40.0M | encoder.ColocationId(colocation_id_) |
259 | 40.0M | .Hash(hash_present_, hash_, hashed_group_) |
260 | 40.0M | .Range(range_group_); |
261 | 40.0M | } |
262 | 50.0M | } |
263 | | |
264 | 842M | void DocKey::Clear() { |
265 | 842M | hash_present_ = false; |
266 | 842M | hash_ = 0xdead; |
267 | 842M | hashed_group_.clear(); |
268 | 842M | range_group_.clear(); |
269 | 842M | } |
270 | | |
271 | 0 | void DocKey::ClearRangeComponents() { |
272 | 0 | range_group_.clear(); |
273 | 0 | } |
274 | | |
275 | 12 | void DocKey::ResizeRangeComponents(int new_size) { |
276 | 12 | range_group_.resize(new_size); |
277 | 12 | } |
278 | | |
279 | | namespace { |
280 | | |
281 | | class DecodeDocKeyCallback { |
282 | | public: |
283 | 92.9M | explicit DecodeDocKeyCallback(boost::container::small_vector_base<Slice>* out) : out_(out) {} |
284 | | |
285 | 68.6M | boost::container::small_vector_base<Slice>* hashed_group() const { |
286 | 68.6M | return nullptr; |
287 | 68.6M | } |
288 | | |
289 | 93.0M | boost::container::small_vector_base<Slice>* range_group() const { |
290 | 93.0M | return out_; |
291 | 93.0M | } |
292 | | |
293 | 93.0M | void SetHash(...) const {} |
294 | | |
295 | 19.5M | void SetCoTableId(const Uuid cotable_id) const {} |
296 | | |
297 | 27 | void SetColocationId(const ColocationId colocation_id) const {} |
298 | | |
299 | | private: |
300 | | boost::container::small_vector_base<Slice>* out_; |
301 | | }; |
302 | | |
303 | | class DummyCallback { |
304 | | public: |
305 | 203M | boost::container::small_vector_base<Slice>* hashed_group() const { |
306 | 203M | return nullptr; |
307 | 203M | } |
308 | | |
309 | 119M | boost::container::small_vector_base<Slice>* range_group() const { |
310 | 119M | return nullptr; |
311 | 119M | } |
312 | | |
313 | 305M | void SetHash(...) const {} |
314 | | |
315 | 62.8M | void SetCoTableId(const Uuid cotable_id) const {} |
316 | | |
317 | 2.59k | void SetColocationId(const ColocationId colocation_id) const {} |
318 | | |
319 | 102M | PrimitiveValue* AddSubkey() const { |
320 | 102M | return nullptr; |
321 | 102M | } |
322 | | }; |
323 | | |
324 | | class EncodedSizesCallback { |
325 | | public: |
326 | 93.8M | explicit EncodedSizesCallback(DocKeyDecoder* decoder) : decoder_(decoder) {} |
327 | | |
328 | 45.8M | boost::container::small_vector_base<Slice>* hashed_group() const { |
329 | 45.8M | return nullptr; |
330 | 45.8M | } |
331 | | |
332 | 93.7M | boost::container::small_vector_base<Slice>* range_group() const { |
333 | 93.7M | range_group_start_ = decoder_->left_input().data(); |
334 | 93.7M | return nullptr; |
335 | 93.7M | } |
336 | | |
337 | 93.7M | void SetHash(...) const {} |
338 | | |
339 | 43.9M | void SetCoTableId(const Uuid cotable_id) const {} |
340 | | |
341 | 472 | void SetColocationId(const ColocationId colocation_id) const {} |
342 | | |
343 | 0 | PrimitiveValue* AddSubkey() const { |
344 | 0 | return nullptr; |
345 | 0 | } |
346 | | |
347 | 93.8M | const uint8_t* range_group_start() { |
348 | 93.8M | return range_group_start_; |
349 | 93.8M | } |
350 | | |
351 | | private: |
352 | | DocKeyDecoder* decoder_; |
353 | | mutable const uint8_t* range_group_start_ = nullptr; |
354 | | }; |
355 | | |
356 | | } // namespace |
357 | | |
358 | | yb::Status DocKey::PartiallyDecode(Slice *slice, |
359 | 92.9M | boost::container::small_vector_base<Slice>* out) { |
360 | 92.9M | CHECK_NOTNULL(out); |
361 | 92.9M | DocKeyDecoder decoder(*slice); |
362 | 92.9M | RETURN_NOT_OK(DoDecode( |
363 | 92.9M | &decoder, DocKeyPart::kWholeDocKey, AllowSpecial::kFalse, DecodeDocKeyCallback(out))); |
364 | 92.9M | *slice = decoder.left_input(); |
365 | 92.9M | return Status::OK(); |
366 | 92.9M | } |
367 | | |
368 | 6.64M | Result<DocKeyHash> DocKey::DecodeHash(const Slice& slice) { |
369 | 6.64M | DocKeyDecoder decoder(slice); |
370 | 6.64M | RETURN_NOT_OK(decoder.DecodeCotableId()); |
371 | 6.64M | RETURN_NOT_OK(decoder.DecodeColocationId()); |
372 | 6.64M | uint16_t hash; |
373 | 6.64M | RETURN_NOT_OK(decoder.DecodeHashCode(&hash)); |
374 | 6.64M | return hash; |
375 | 6.64M | } |
376 | | |
377 | 304M | Result<size_t> DocKey::EncodedSize(Slice slice, DocKeyPart part, AllowSpecial allow_special) { |
378 | 304M | auto initial_begin = slice.cdata(); |
379 | 304M | DocKeyDecoder decoder(slice); |
380 | 304M | RETURN_NOT_OK(DoDecode(&decoder, part, allow_special, DummyCallback())); |
381 | 304M | return decoder.left_input().cdata() - initial_begin; |
382 | 304M | } |
383 | | |
384 | 10 | Result<std::pair<size_t, bool>> DocKey::EncodedSizeAndHashPresent(Slice slice, DocKeyPart part) { |
385 | 10 | class HashPresenceAwareDummyCallback : public DummyCallback { |
386 | 10 | public: |
387 | 10 | explicit HashPresenceAwareDummyCallback(bool* hash_present) : hash_present_(hash_present) {} |
388 | | |
389 | 10 | void SetHash(const bool hash_present, const DocKeyHash hash = 0) const { |
390 | 10 | *hash_present_ = hash_present; |
391 | 10 | } |
392 | | |
393 | 10 | private: |
394 | 10 | bool* hash_present_; |
395 | 10 | }; |
396 | | |
397 | 10 | auto initial_begin = slice.cdata(); |
398 | 10 | DocKeyDecoder decoder(slice); |
399 | 10 | bool hash_present = false; |
400 | 10 | HashPresenceAwareDummyCallback callback(&hash_present); |
401 | 10 | RETURN_NOT_OK(DoDecode(&decoder, part, AllowSpecial::kFalse, callback)); |
402 | | // TODO: left_input() should be called remaining_input(). |
403 | 10 | return std::make_pair(decoder.left_input().cdata() - initial_begin, hash_present); |
404 | 10 | } |
405 | | |
406 | | Result<std::pair<size_t, size_t>> DocKey::EncodedHashPartAndDocKeySizes( |
407 | | Slice slice, |
408 | 93.8M | AllowSpecial allow_special) { |
409 | 93.8M | auto initial_begin = slice.data(); |
410 | 93.8M | DocKeyDecoder decoder(slice); |
411 | 93.8M | EncodedSizesCallback callback(&decoder); |
412 | 93.8M | RETURN_NOT_OK(DoDecode( |
413 | 93.8M | &decoder, DocKeyPart::kWholeDocKey, allow_special, callback)); |
414 | 93.8M | return std::make_pair(callback.range_group_start() - initial_begin, |
415 | 93.8M | decoder.left_input().data() - initial_begin); |
416 | 93.8M | } |
417 | | |
418 | | class DocKey::DecodeFromCallback { |
419 | | public: |
420 | 438M | explicit DecodeFromCallback(DocKey* key) : key_(key) { |
421 | 438M | } |
422 | | |
423 | 144M | std::vector<PrimitiveValue>* hashed_group() const { |
424 | 144M | return &key_->hashed_group_; |
425 | 144M | } |
426 | | |
427 | 417M | std::vector<PrimitiveValue>* range_group() const { |
428 | 417M | return &key_->range_group_; |
429 | 417M | } |
430 | | |
431 | 438M | void SetHash(bool present, DocKeyHash hash = 0) const { |
432 | 438M | key_->hash_present_ = present; |
433 | 438M | if (present) { |
434 | 160M | key_->hash_ = hash; |
435 | 160M | } |
436 | 438M | } |
437 | 263M | void SetCoTableId(const Uuid cotable_id) const { |
438 | 263M | key_->cotable_id_ = cotable_id; |
439 | 263M | } |
440 | | |
441 | 2.59k | void SetColocationId(const ColocationId colocation_id) const { |
442 | 2.59k | key_->colocation_id_ = colocation_id; |
443 | 2.59k | } |
444 | | |
445 | | private: |
446 | | DocKey* key_; |
447 | | }; |
448 | | |
449 | 438M | Status DocKey::DecodeFrom(Slice *slice, DocKeyPart part_to_decode, AllowSpecial allow_special) { |
450 | 438M | Clear(); |
451 | 438M | DocKeyDecoder decoder(*slice); |
452 | 438M | RETURN_NOT_OK(DoDecode(&decoder, part_to_decode, allow_special, DecodeFromCallback(this))); |
453 | 438M | *slice = decoder.left_input(); |
454 | 438M | return Status::OK(); |
455 | 438M | } |
456 | | |
457 | | Result<size_t> DocKey::DecodeFrom( |
458 | 28.9M | const Slice& slice, DocKeyPart part_to_decode, AllowSpecial allow_special) { |
459 | 28.9M | Slice copy = slice; |
460 | 28.9M | RETURN_NOT_OK(DecodeFrom(©, part_to_decode, allow_special)); |
461 | 28.9M | return slice.size() - copy.size(); |
462 | 28.9M | } |
463 | | |
464 | | namespace { |
465 | | |
466 | | // Return limit on number of range components to decode based on part_to_decode and whether hash |
467 | | // component are present in key (hash_present). |
468 | 913M | int MaxRangeComponentsToDecode(const DocKeyPart part_to_decode, const bool hash_present) { |
469 | 913M | switch (part_to_decode) { |
470 | 0 | case DocKeyPart::kUpToId: |
471 | 0 | LOG(FATAL) << "Internal error: unexpected to have DocKeyPart::kUpToId here"; |
472 | 694M | case DocKeyPart::kWholeDocKey: |
473 | 694M | return kNumValuesNoLimit; |
474 | 4.63M | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; |
475 | 109M | case DocKeyPart::kUpToHash: |
476 | 109M | return 0; |
477 | 109M | case DocKeyPart::kUpToHashOrFirstRange: |
478 | 109M | return hash_present ? 080.4M : 128.6M ; |
479 | 913M | } |
480 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); |
481 | 0 | } |
482 | | |
483 | | } // namespace |
484 | | |
485 | | template<class Callback> |
486 | | yb::Status DocKey::DoDecode(DocKeyDecoder* decoder, |
487 | | DocKeyPart part_to_decode, |
488 | | AllowSpecial allow_special, |
489 | 930M | const Callback& callback) { |
490 | 930M | Uuid cotable_id; |
491 | 930M | ColocationId colocation_id; |
492 | 930M | if (VERIFY_RESULT(decoder->DecodeCotableId(&cotable_id))) { |
493 | 389M | callback.SetCoTableId(cotable_id); |
494 | 541M | } else if (VERIFY_RESULT(decoder->DecodeColocationId(&colocation_id))) { |
495 | 5.69k | callback.SetColocationId(colocation_id); |
496 | 5.69k | } |
497 | | |
498 | 0 | switch (part_to_decode) { |
499 | 1.63k | case DocKeyPart::kUpToId: |
500 | 1.63k | return Status::OK(); |
501 | 21.5M | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; |
502 | 126M | case DocKeyPart::kUpToHash: FALLTHROUGH_INTENDED; |
503 | 235M | case DocKeyPart::kUpToHashOrFirstRange: FALLTHROUGH_INTENDED; |
504 | 930M | case DocKeyPart::kWholeDocKey: |
505 | 930M | uint16_t hash_code; |
506 | 930M | const auto hash_present = VERIFY_RESULT930M (decoder->DecodeHashCode(&hash_code, allow_special));930M |
507 | 930M | if (hash_present) { |
508 | 479M | callback.SetHash(/* present */ true, hash_code); |
509 | 479M | if (part_to_decode == DocKeyPart::kUpToHashCode) { |
510 | 16.8M | return Status::OK(); |
511 | 16.8M | } |
512 | 462M | RETURN_NOT_OK_PREPEND( |
513 | 462M | ConsumePrimitiveValuesFromKey( |
514 | 462M | decoder->mutable_input(), allow_special, callback.hashed_group()), |
515 | 462M | "Error when decoding hashed components of a document key"); |
516 | 462M | } else { |
517 | 451M | callback.SetHash(/* present */ false); |
518 | 451M | } |
519 | 913M | if (decoder->left_input().empty()) { |
520 | 26 | return Status::OK(); |
521 | 26 | } |
522 | | // The rest are range components. |
523 | 913M | const auto max_components_to_decode = |
524 | 913M | MaxRangeComponentsToDecode(part_to_decode, hash_present); |
525 | 913M | if (max_components_to_decode > 0) { |
526 | 723M | RETURN_NOT_OK_PREPEND( |
527 | 723M | ConsumePrimitiveValuesFromKey( |
528 | 723M | decoder->mutable_input(), allow_special, callback.range_group(), |
529 | 723M | max_components_to_decode), |
530 | 723M | "Error when decoding range components of a document key"); |
531 | 722M | } |
532 | 913M | return Status::OK(); |
533 | 930M | } |
534 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); |
535 | 0 | } doc_key.cc:yb::Status yb::docdb::DocKey::DoDecode<yb::docdb::(anonymous namespace)::DecodeDocKeyCallback>(yb::docdb::DocKeyDecoder*, yb::docdb::DocKeyPart, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::(anonymous namespace)::DecodeDocKeyCallback const&) Line | Count | Source | 489 | 93.0M | const Callback& callback) { | 490 | 93.0M | Uuid cotable_id; | 491 | 93.0M | ColocationId colocation_id; | 492 | 93.0M | if (VERIFY_RESULT(decoder->DecodeCotableId(&cotable_id))) { | 493 | 19.5M | callback.SetCoTableId(cotable_id); | 494 | 73.4M | } else if (VERIFY_RESULT(decoder->DecodeColocationId(&colocation_id))) { | 495 | 27 | callback.SetColocationId(colocation_id); | 496 | 27 | } | 497 | | | 498 | 0 | switch (part_to_decode) { | 499 | 0 | case DocKeyPart::kUpToId: | 500 | 0 | return Status::OK(); | 501 | 0 | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; | 502 | 0 | case DocKeyPart::kUpToHash: FALLTHROUGH_INTENDED; | 503 | 0 | case DocKeyPart::kUpToHashOrFirstRange: FALLTHROUGH_INTENDED; | 504 | 93.0M | case DocKeyPart::kWholeDocKey: | 505 | 93.0M | uint16_t hash_code; | 506 | 93.0M | const auto hash_present = VERIFY_RESULT(decoder->DecodeHashCode(&hash_code, allow_special)); | 507 | 93.0M | if (hash_present) { | 508 | 68.6M | callback.SetHash(/* present */ true, hash_code); | 509 | 68.6M | if (part_to_decode == DocKeyPart::kUpToHashCode) { | 510 | 0 | return Status::OK(); | 511 | 0 | } | 512 | 68.6M | RETURN_NOT_OK_PREPEND( | 513 | 68.6M | ConsumePrimitiveValuesFromKey( | 514 | 68.6M | decoder->mutable_input(), allow_special, callback.hashed_group()), | 515 | 68.6M | "Error when decoding hashed components of a document key"); | 516 | 68.6M | } else { | 517 | 24.3M | callback.SetHash(/* present */ false); | 518 | 24.3M | } | 519 | 93.0M | if (decoder->left_input().empty()) { | 520 | 0 | return Status::OK(); | 521 | 0 | } | 522 | | // The rest are range components. | 523 | 93.0M | const auto max_components_to_decode = | 524 | 93.0M | MaxRangeComponentsToDecode(part_to_decode, hash_present); | 525 | 93.0M | if (max_components_to_decode > 093.0M ) { | 526 | 93.0M | RETURN_NOT_OK_PREPEND( | 527 | 93.0M | ConsumePrimitiveValuesFromKey( | 528 | 93.0M | decoder->mutable_input(), allow_special, callback.range_group(), | 529 | 93.0M | max_components_to_decode), | 530 | 93.0M | "Error when decoding range components of a document key"); | 531 | 93.0M | } | 532 | 93.0M | return Status::OK(); | 533 | 93.0M | } | 534 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); | 535 | 0 | } |
doc_key.cc:yb::Status yb::docdb::DocKey::DoDecode<yb::docdb::(anonymous namespace)::DummyCallback>(yb::docdb::DocKeyDecoder*, yb::docdb::DocKeyPart, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::(anonymous namespace)::DummyCallback const&) Line | Count | Source | 489 | 304M | const Callback& callback) { | 490 | 304M | Uuid cotable_id; | 491 | 304M | ColocationId colocation_id; | 492 | 304M | if (VERIFY_RESULT(decoder->DecodeCotableId(&cotable_id))) { | 493 | 62.8M | callback.SetCoTableId(cotable_id); | 494 | 242M | } else if (VERIFY_RESULT(decoder->DecodeColocationId(&colocation_id))) { | 495 | 2.59k | callback.SetColocationId(colocation_id); | 496 | 2.59k | } | 497 | | | 498 | 0 | switch (part_to_decode) { | 499 | 1.46k | case DocKeyPart::kUpToId: | 500 | 1.46k | return Status::OK(); | 501 | 143 | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; | 502 | 105M | case DocKeyPart::kUpToHash: FALLTHROUGH_INTENDED; | 503 | 214M | case DocKeyPart::kUpToHashOrFirstRange: FALLTHROUGH_INTENDED; | 504 | 305M | case DocKeyPart::kWholeDocKey: | 505 | 305M | uint16_t hash_code; | 506 | 305M | const auto hash_present = VERIFY_RESULT(decoder->DecodeHashCode(&hash_code, allow_special)); | 507 | 305M | if (hash_present) { | 508 | 203M | callback.SetHash(/* present */ true, hash_code); | 509 | 203M | if (part_to_decode == DocKeyPart::kUpToHashCode) { | 510 | 143 | return Status::OK(); | 511 | 143 | } | 512 | 203M | RETURN_NOT_OK_PREPEND( | 513 | 203M | ConsumePrimitiveValuesFromKey( | 514 | 203M | decoder->mutable_input(), allow_special, callback.hashed_group()), | 515 | 203M | "Error when decoding hashed components of a document key"); | 516 | 203M | } else { | 517 | 101M | callback.SetHash(/* present */ false); | 518 | 101M | } | 519 | 305M | if (decoder->left_input().empty()) { | 520 | 26 | return Status::OK(); | 521 | 26 | } | 522 | | // The rest are range components. | 523 | 305M | const auto max_components_to_decode = | 524 | 305M | MaxRangeComponentsToDecode(part_to_decode, hash_present); | 525 | 305M | if (max_components_to_decode > 0) { | 526 | 119M | RETURN_NOT_OK_PREPEND( | 527 | 119M | ConsumePrimitiveValuesFromKey( | 528 | 119M | decoder->mutable_input(), allow_special, callback.range_group(), | 529 | 119M | max_components_to_decode), | 530 | 119M | "Error when decoding range components of a document key"); | 531 | 119M | } | 532 | 304M | return Status::OK(); | 533 | 304M | } | 534 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); | 535 | 0 | } |
doc_key.cc:yb::Status yb::docdb::DocKey::DoDecode<yb::docdb::DocKey::EncodedSizeAndHashPresent(yb::Slice, yb::docdb::DocKeyPart)::HashPresenceAwareDummyCallback>(yb::docdb::DocKeyDecoder*, yb::docdb::DocKeyPart, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::DocKey::EncodedSizeAndHashPresent(yb::Slice, yb::docdb::DocKeyPart)::HashPresenceAwareDummyCallback const&) Line | Count | Source | 489 | 10 | const Callback& callback) { | 490 | 10 | Uuid cotable_id; | 491 | 10 | ColocationId colocation_id; | 492 | 10 | if (VERIFY_RESULT(decoder->DecodeCotableId(&cotable_id))) { | 493 | 0 | callback.SetCoTableId(cotable_id); | 494 | 10 | } else if (VERIFY_RESULT(decoder->DecodeColocationId(&colocation_id))) { | 495 | 0 | callback.SetColocationId(colocation_id); | 496 | 0 | } | 497 | | | 498 | 0 | switch (part_to_decode) { | 499 | 0 | case DocKeyPart::kUpToId: | 500 | 0 | return Status::OK(); | 501 | 0 | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; | 502 | 10 | case DocKeyPart::kUpToHash: FALLTHROUGH_INTENDED; | 503 | 10 | case DocKeyPart::kUpToHashOrFirstRange: FALLTHROUGH_INTENDED; | 504 | 10 | case DocKeyPart::kWholeDocKey: | 505 | 10 | uint16_t hash_code; | 506 | 10 | const auto hash_present = VERIFY_RESULT(decoder->DecodeHashCode(&hash_code, allow_special)); | 507 | 10 | if (hash_present) { | 508 | 10 | callback.SetHash(/* present */ true, hash_code); | 509 | 10 | if (part_to_decode == DocKeyPart::kUpToHashCode) { | 510 | 0 | return Status::OK(); | 511 | 0 | } | 512 | 10 | RETURN_NOT_OK_PREPEND( | 513 | 10 | ConsumePrimitiveValuesFromKey( | 514 | 10 | decoder->mutable_input(), allow_special, callback.hashed_group()), | 515 | 10 | "Error when decoding hashed components of a document key"); | 516 | 10 | } else { | 517 | 0 | callback.SetHash(/* present */ false); | 518 | 0 | } | 519 | 10 | if (decoder->left_input().empty()) { | 520 | 0 | return Status::OK(); | 521 | 0 | } | 522 | | // The rest are range components. | 523 | 10 | const auto max_components_to_decode = | 524 | 10 | MaxRangeComponentsToDecode(part_to_decode, hash_present); | 525 | 10 | if (max_components_to_decode > 0) { | 526 | 0 | RETURN_NOT_OK_PREPEND( | 527 | 0 | ConsumePrimitiveValuesFromKey( | 528 | 0 | decoder->mutable_input(), allow_special, callback.range_group(), | 529 | 0 | max_components_to_decode), | 530 | 0 | "Error when decoding range components of a document key"); | 531 | 0 | } | 532 | 10 | return Status::OK(); | 533 | 10 | } | 534 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); | 535 | 0 | } |
doc_key.cc:yb::Status yb::docdb::DocKey::DoDecode<yb::docdb::(anonymous namespace)::EncodedSizesCallback>(yb::docdb::DocKeyDecoder*, yb::docdb::DocKeyPart, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::(anonymous namespace)::EncodedSizesCallback const&) Line | Count | Source | 489 | 93.7M | const Callback& callback) { | 490 | 93.7M | Uuid cotable_id; | 491 | 93.7M | ColocationId colocation_id; | 492 | 93.7M | if (VERIFY_RESULT(decoder->DecodeCotableId(&cotable_id))) { | 493 | 43.9M | callback.SetCoTableId(cotable_id); | 494 | 49.7M | } else if (VERIFY_RESULT(decoder->DecodeColocationId(&colocation_id))) { | 495 | 472 | callback.SetColocationId(colocation_id); | 496 | 472 | } | 497 | | | 498 | 0 | switch (part_to_decode) { | 499 | 0 | case DocKeyPart::kUpToId: | 500 | 0 | return Status::OK(); | 501 | 0 | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; | 502 | 0 | case DocKeyPart::kUpToHash: FALLTHROUGH_INTENDED; | 503 | 0 | case DocKeyPart::kUpToHashOrFirstRange: FALLTHROUGH_INTENDED; | 504 | 93.8M | case DocKeyPart::kWholeDocKey: | 505 | 93.8M | uint16_t hash_code; | 506 | 93.8M | const auto hash_present = VERIFY_RESULT(decoder->DecodeHashCode(&hash_code, allow_special)); | 507 | 93.8M | if (hash_present) { | 508 | 45.8M | callback.SetHash(/* present */ true, hash_code); | 509 | 45.8M | if (part_to_decode == DocKeyPart::kUpToHashCode) { | 510 | 0 | return Status::OK(); | 511 | 0 | } | 512 | 45.8M | RETURN_NOT_OK_PREPEND( | 513 | 45.8M | ConsumePrimitiveValuesFromKey( | 514 | 45.8M | decoder->mutable_input(), allow_special, callback.hashed_group()), | 515 | 45.8M | "Error when decoding hashed components of a document key"); | 516 | 47.9M | } else { | 517 | 47.9M | callback.SetHash(/* present */ false); | 518 | 47.9M | } | 519 | 93.8M | if (decoder->left_input().empty()) { | 520 | 0 | return Status::OK(); | 521 | 0 | } | 522 | | // The rest are range components. | 523 | 93.8M | const auto max_components_to_decode = | 524 | 93.8M | MaxRangeComponentsToDecode(part_to_decode, hash_present); | 525 | 93.8M | if (max_components_to_decode > 0) { | 526 | 93.7M | RETURN_NOT_OK_PREPEND( | 527 | 93.7M | ConsumePrimitiveValuesFromKey( | 528 | 93.7M | decoder->mutable_input(), allow_special, callback.range_group(), | 529 | 93.7M | max_components_to_decode), | 530 | 93.7M | "Error when decoding range components of a document key"); | 531 | 93.7M | } | 532 | 93.8M | return Status::OK(); | 533 | 93.7M | } | 534 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); | 535 | 0 | } |
yb::Status yb::docdb::DocKey::DoDecode<yb::docdb::DocKey::DecodeFromCallback>(yb::docdb::DocKeyDecoder*, yb::docdb::DocKeyPart, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::DocKey::DecodeFromCallback const&) Line | Count | Source | 489 | 438M | const Callback& callback) { | 490 | 438M | Uuid cotable_id; | 491 | 438M | ColocationId colocation_id; | 492 | 438M | if (VERIFY_RESULT(decoder->DecodeCotableId(&cotable_id))) { | 493 | 263M | callback.SetCoTableId(cotable_id); | 494 | 263M | } else if (175M VERIFY_RESULT175M (decoder->DecodeColocationId(&colocation_id))) { | 495 | 2.59k | callback.SetColocationId(colocation_id); | 496 | 2.59k | } | 497 | | | 498 | 0 | switch (part_to_decode) { | 499 | 173 | case DocKeyPart::kUpToId: | 500 | 173 | return Status::OK(); | 501 | 21.5M | case DocKeyPart::kUpToHashCode: FALLTHROUGH_INTENDED; | 502 | 21.5M | case DocKeyPart::kUpToHash: FALLTHROUGH_INTENDED; | 503 | 21.5M | case DocKeyPart::kUpToHashOrFirstRange: FALLTHROUGH_INTENDED; | 504 | 438M | case DocKeyPart::kWholeDocKey: | 505 | 438M | uint16_t hash_code; | 506 | 438M | const auto hash_present = VERIFY_RESULT438M (decoder->DecodeHashCode(&hash_code, allow_special));438M | 507 | 438M | if (hash_present) { | 508 | 160M | callback.SetHash(/* present */ true, hash_code); | 509 | 160M | if (part_to_decode == DocKeyPart::kUpToHashCode) { | 510 | 16.8M | return Status::OK(); | 511 | 16.8M | } | 512 | 144M | RETURN_NOT_OK_PREPEND( | 513 | 144M | ConsumePrimitiveValuesFromKey( | 514 | 144M | decoder->mutable_input(), allow_special, callback.hashed_group()), | 515 | 144M | "Error when decoding hashed components of a document key"); | 516 | 277M | } else { | 517 | 277M | callback.SetHash(/* present */ false); | 518 | 277M | } | 519 | 421M | if (decoder->left_input().empty()) { | 520 | 0 | return Status::OK(); | 521 | 0 | } | 522 | | // The rest are range components. | 523 | 421M | const auto max_components_to_decode = | 524 | 421M | MaxRangeComponentsToDecode(part_to_decode, hash_present); | 525 | 421M | if (max_components_to_decode > 0) { | 526 | 417M | RETURN_NOT_OK_PREPEND( | 527 | 417M | ConsumePrimitiveValuesFromKey( | 528 | 417M | decoder->mutable_input(), allow_special, callback.range_group(), | 529 | 417M | max_components_to_decode), | 530 | 417M | "Error when decoding range components of a document key"); | 531 | 417M | } | 532 | 421M | return Status::OK(); | 533 | 438M | } | 534 | 0 | FATAL_INVALID_ENUM_VALUE(DocKeyPart, part_to_decode); | 535 | 0 | } |
|
536 | | |
537 | 5.10M | yb::Status DocKey::FullyDecodeFrom(const rocksdb::Slice& slice) { |
538 | 5.10M | rocksdb::Slice mutable_slice = slice; |
539 | 5.10M | Status status = DecodeFrom(&mutable_slice); |
540 | 5.10M | if (!mutable_slice.empty()) { |
541 | 0 | return STATUS_SUBSTITUTE(InvalidArgument, |
542 | 0 | "Expected all bytes of the slice to be decoded into DocKey, found $0 extra bytes", |
543 | 0 | mutable_slice.size()); |
544 | 0 | } |
545 | 5.10M | return status; |
546 | 5.10M | } |
547 | | |
548 | | namespace { |
549 | | |
550 | | // We need a special implementation of converting a vector to string because we need to pass the |
551 | | // auto_decode_keys flag to PrimitiveValue::ToString. |
552 | | void AppendVectorToString( |
553 | | std::string* dest, |
554 | | const std::vector<PrimitiveValue>& vec, |
555 | 1.56M | AutoDecodeKeys auto_decode_keys) { |
556 | 1.56M | bool need_comma = false; |
557 | 2.51M | for (const auto& pv : vec) { |
558 | 2.51M | if (need_comma) { |
559 | 1.43M | dest->append(", "); |
560 | 1.43M | } |
561 | 2.51M | need_comma = true; |
562 | 2.51M | dest->append(pv.ToString(auto_decode_keys)); |
563 | 2.51M | } |
564 | 1.56M | } |
565 | | |
566 | | void AppendVectorToStringWithBrackets( |
567 | | std::string* dest, |
568 | | const std::vector<PrimitiveValue>& vec, |
569 | 1.33M | AutoDecodeKeys auto_decode_keys) { |
570 | 1.33M | dest->push_back('['); |
571 | 1.33M | AppendVectorToString(dest, vec, auto_decode_keys); |
572 | 1.33M | dest->push_back(']'); |
573 | 1.33M | } |
574 | | |
575 | | } // namespace |
576 | | |
577 | 665k | string DocKey::ToString(AutoDecodeKeys auto_decode_keys) const { |
578 | 665k | string result = "DocKey("; |
579 | 665k | if (!cotable_id_.IsNil()) { |
580 | 3.92k | result += "CoTableId="; |
581 | 3.92k | result += cotable_id_.ToString(); |
582 | 3.92k | result += ", "; |
583 | 661k | } else if (colocation_id_ != kColocationIdNotSet) { |
584 | 3.90k | result += "ColocationId="; |
585 | 3.90k | result += std::to_string(colocation_id_); |
586 | 3.90k | result += ", "; |
587 | 3.90k | } |
588 | | |
589 | 665k | if (hash_present_) { |
590 | 425k | result += StringPrintf("0x%04x", hash_); |
591 | 425k | result += ", "; |
592 | 425k | } |
593 | | |
594 | 665k | AppendVectorToStringWithBrackets(&result, hashed_group_, auto_decode_keys); |
595 | 665k | result += ", "; |
596 | 665k | AppendVectorToStringWithBrackets(&result, range_group_, auto_decode_keys); |
597 | 665k | result.push_back(')'); |
598 | 665k | return result; |
599 | 665k | } |
600 | | |
601 | 135k | bool DocKey::operator ==(const DocKey& other) const { |
602 | 135k | return cotable_id_ == other.cotable_id_ && |
603 | 135k | colocation_id_ == other.colocation_id_ && |
604 | 135k | HashedComponentsEqual(other) && |
605 | 135k | range_group_ == other.range_group_74.6k ; |
606 | 135k | } |
607 | | |
608 | 135k | bool DocKey::HashedComponentsEqual(const DocKey& other) const { |
609 | 135k | return hash_present_ == other.hash_present_ && |
610 | | // Only compare hashes and hashed groups if the hash presence flag is set. |
611 | 135k | (134k !hash_present_134k || (66.1k hash_ == other.hash_66.1k && hashed_group_ == other.hashed_group_6.23k )); |
612 | 135k | } |
613 | | |
614 | 156k | void DocKey::AddRangeComponent(const PrimitiveValue& val) { |
615 | 156k | range_group_.push_back(val); |
616 | 156k | } |
617 | | |
618 | 12 | void DocKey::SetRangeComponent(const PrimitiveValue& val, int idx) { |
619 | 12 | DCHECK_LT(idx, range_group_.size()); |
620 | 12 | range_group_[idx] = val; |
621 | 12 | } |
622 | | |
623 | 87.3k | int DocKey::CompareTo(const DocKey& other) const { |
624 | 87.3k | int result = CompareUsingLessThan(cotable_id_, other.cotable_id_); |
625 | 87.3k | if (result != 0) return result0 ; |
626 | | |
627 | 87.3k | result = CompareUsingLessThan(colocation_id_, other.colocation_id_); |
628 | 87.3k | if (result != 0) return result0 ; |
629 | | |
630 | 87.3k | result = CompareUsingLessThan(hash_present_, other.hash_present_); |
631 | 87.3k | if (result != 0) return result720 ; |
632 | | |
633 | 86.6k | if (hash_present_) { |
634 | 45.0k | result = CompareUsingLessThan(hash_, other.hash_); |
635 | 45.0k | if (result != 0) return result39.9k ; |
636 | 45.0k | } |
637 | | |
638 | 46.6k | result = CompareVectors(hashed_group_, other.hashed_group_); |
639 | 46.6k | if (result != 0) return result0 ; |
640 | | |
641 | 46.6k | return CompareVectors(range_group_, other.range_group_); |
642 | 46.6k | } |
643 | | |
644 | 613k | DocKey DocKey::FromRedisKey(uint16_t hash, const string &key) { |
645 | 613k | DocKey new_doc_key; |
646 | 613k | new_doc_key.hash_present_ = true; |
647 | 613k | new_doc_key.hash_ = hash; |
648 | 613k | new_doc_key.hashed_group_.emplace_back(key); |
649 | 613k | return new_doc_key; |
650 | 613k | } |
651 | | |
652 | 219k | KeyBytes DocKey::EncodedFromRedisKey(uint16_t hash, const std::string &key) { |
653 | 219k | KeyBytes result; |
654 | 219k | result.AppendValueType(ValueType::kUInt16Hash); |
655 | 219k | result.AppendUInt16(hash); |
656 | 219k | result.AppendValueType(ValueType::kString); |
657 | 219k | result.AppendString(key); |
658 | 219k | result.AppendValueType(ValueType::kGroupEnd); |
659 | 219k | result.AppendValueType(ValueType::kGroupEnd); |
660 | 219k | DCHECK_EQ(result, FromRedisKey(hash, key).Encode()); |
661 | 219k | return result; |
662 | 219k | } |
663 | | |
664 | 961 | std::string DocKey::DebugSliceToString(Slice slice) { |
665 | 961 | DocKey key; |
666 | 961 | auto decoded_size = key.DecodeFrom(slice, DocKeyPart::kWholeDocKey, AllowSpecial::kTrue); |
667 | 961 | if (!decoded_size.ok()) { |
668 | 16 | return decoded_size.status().ToString() + ": " + slice.ToDebugHexString(); |
669 | 16 | } |
670 | 945 | slice.remove_prefix(*decoded_size); |
671 | 945 | auto result = key.ToString(); |
672 | 945 | if (!slice.empty()) { |
673 | 0 | result += " + "; |
674 | 0 | result += slice.ToDebugHexString(); |
675 | 0 | } |
676 | 945 | return result; |
677 | 961 | } |
678 | | |
679 | 0 | bool DocKey::BelongsTo(const Schema& schema) const { |
680 | 0 | if (!cotable_id_.IsNil()) { |
681 | 0 | return cotable_id_ == schema.cotable_id(); |
682 | 0 | } else if (colocation_id_ != kColocationIdNotSet) { |
683 | 0 | return colocation_id_ == schema.colocation_id(); |
684 | 0 | } |
685 | 0 | return schema.cotable_id().IsNil() && schema.colocation_id() == kColocationIdNotSet; |
686 | 0 | } |
687 | | |
688 | | // ------------------------------------------------------------------------------------------------ |
689 | | // SubDocKey |
690 | | // ------------------------------------------------------------------------------------------------ |
691 | | |
692 | 3.59M | KeyBytes SubDocKey::DoEncode(bool include_hybrid_time) const { |
693 | 3.59M | KeyBytes key_bytes = doc_key_.Encode(); |
694 | 3.59M | for (const auto& subkey : subkeys_) { |
695 | 2.70M | subkey.AppendToKey(&key_bytes); |
696 | 2.70M | } |
697 | 3.59M | if (has_hybrid_time() && include_hybrid_time109k ) { |
698 | 109k | AppendDocHybridTime(doc_ht_, &key_bytes); |
699 | 109k | } |
700 | 3.59M | return key_bytes; |
701 | 3.59M | } |
702 | | |
703 | | namespace { |
704 | | |
705 | | class DecodeSubDocKeyCallback { |
706 | | public: |
707 | 92.9M | explicit DecodeSubDocKeyCallback(boost::container::small_vector_base<Slice>* out) : out_(out) {} |
708 | | |
709 | 92.9M | CHECKED_STATUS DecodeDocKey(Slice* slice) const { |
710 | 92.9M | return DocKey::PartiallyDecode(slice, out_); |
711 | 92.9M | } |
712 | | |
713 | | // We don't need subkeys in partial decoding. |
714 | 112M | PrimitiveValue* AddSubkey() const { |
715 | 112M | return nullptr; |
716 | 112M | } |
717 | | |
718 | 93.0M | DocHybridTime& doc_hybrid_time() const { |
719 | 93.0M | return doc_hybrid_time_; |
720 | 93.0M | } |
721 | | |
722 | 93.0M | void DocHybridTimeSlice(Slice slice) const { |
723 | 93.0M | out_->push_back(slice); |
724 | 93.0M | } |
725 | | private: |
726 | | boost::container::small_vector_base<Slice>* out_; |
727 | | mutable DocHybridTime doc_hybrid_time_; |
728 | | }; |
729 | | |
730 | | } // namespace |
731 | | |
732 | 93.0M | Status SubDocKey::PartiallyDecode(Slice* slice, boost::container::small_vector_base<Slice>* out) { |
733 | 93.0M | CHECK_NOTNULL(out); |
734 | 93.0M | return DoDecode(slice, HybridTimeRequired::kTrue, AllowSpecial::kFalse, |
735 | 93.0M | DecodeSubDocKeyCallback(out)); |
736 | 93.0M | } |
737 | | |
738 | | class SubDocKey::DecodeCallback { |
739 | | public: |
740 | 404M | explicit DecodeCallback(SubDocKey* key) : key_(key) {} |
741 | | |
742 | 404M | CHECKED_STATUS DecodeDocKey(Slice* slice) const { |
743 | 404M | return key_->doc_key_.DecodeFrom(slice); |
744 | 404M | } |
745 | | |
746 | 339M | PrimitiveValue* AddSubkey() const { |
747 | 339M | key_->subkeys_.emplace_back(); |
748 | 339M | return &key_->subkeys_.back(); |
749 | 339M | } |
750 | | |
751 | 404M | DocHybridTime& doc_hybrid_time() const { |
752 | 404M | return key_->doc_ht_; |
753 | 404M | } |
754 | | |
755 | 93.5M | void DocHybridTimeSlice(Slice slice) const { |
756 | 93.5M | } |
757 | | private: |
758 | | SubDocKey* key_; |
759 | | }; |
760 | | |
761 | | Status SubDocKey::DecodeFrom( |
762 | 404M | Slice* slice, HybridTimeRequired require_hybrid_time, AllowSpecial allow_special) { |
763 | 404M | Clear(); |
764 | 404M | return DoDecode(slice, require_hybrid_time, allow_special, DecodeCallback(this)); |
765 | 404M | } |
766 | | |
767 | 211M | Status SubDocKey::FullyDecodeFromKeyWithOptionalHybridTime(const rocksdb::Slice& slice) { |
768 | 211M | return FullyDecodeFrom(slice, HybridTimeRequired::kFalse); |
769 | 211M | } |
770 | | |
771 | 222M | Result<bool> SubDocKey::DecodeSubkey(Slice* slice) { |
772 | 222M | return DecodeSubkey(slice, DummyCallback()); |
773 | 222M | } |
774 | | |
775 | | template<class Callback> |
776 | 1.17G | Result<bool> SubDocKey::DecodeSubkey(Slice* slice, const Callback& callback) { |
777 | 1.17G | if (!slice->empty() && *slice->data() != ValueTypeAsChar::kHybridTime759M ) { |
778 | 554M | RETURN_NOT_OK(PrimitiveValue::DecodeKey(slice, callback.AddSubkey())); |
779 | 554M | return true; |
780 | 554M | } |
781 | 618M | return false; |
782 | 1.17G | } doc_key.cc:yb::Result<bool> yb::docdb::SubDocKey::DecodeSubkey<yb::docdb::(anonymous namespace)::DecodeSubDocKeyCallback>(yb::Slice*, yb::docdb::(anonymous namespace)::DecodeSubDocKeyCallback const&) Line | Count | Source | 776 | 205M | Result<bool> SubDocKey::DecodeSubkey(Slice* slice, const Callback& callback) { | 777 | 205M | if (!slice->empty() && *slice->data() != ValueTypeAsChar::kHybridTime205M ) { | 778 | 112M | RETURN_NOT_OK(PrimitiveValue::DecodeKey(slice, callback.AddSubkey())); | 779 | 112M | return true; | 780 | 112M | } | 781 | 92.9M | return false; | 782 | 205M | } |
yb::Result<bool> yb::docdb::SubDocKey::DecodeSubkey<yb::docdb::SubDocKey::DecodeCallback>(yb::Slice*, yb::docdb::SubDocKey::DecodeCallback const&) Line | Count | Source | 776 | 744M | Result<bool> SubDocKey::DecodeSubkey(Slice* slice, const Callback& callback) { | 777 | 744M | if (!slice->empty() && *slice->data() != ValueTypeAsChar::kHybridTime433M ) { | 778 | 339M | RETURN_NOT_OK(PrimitiveValue::DecodeKey(slice, callback.AddSubkey())); | 779 | 339M | return true; | 780 | 339M | } | 781 | 404M | return false; | 782 | 744M | } |
doc_key.cc:yb::Result<bool> yb::docdb::SubDocKey::DecodeSubkey<yb::docdb::(anonymous namespace)::DummyCallback>(yb::Slice*, yb::docdb::(anonymous namespace)::DummyCallback const&) Line | Count | Source | 776 | 222M | Result<bool> SubDocKey::DecodeSubkey(Slice* slice, const Callback& callback) { | 777 | 222M | if (!slice->empty() && *slice->data() != ValueTypeAsChar::kHybridTime121M ) { | 778 | 102M | RETURN_NOT_OK(PrimitiveValue::DecodeKey(slice, callback.AddSubkey())); | 779 | 102M | return true; | 780 | 102M | } | 781 | 120M | return false; | 782 | 222M | } |
|
783 | | |
784 | | template<class Callback> |
785 | | Status SubDocKey::DoDecode(rocksdb::Slice* slice, |
786 | | const HybridTimeRequired require_hybrid_time, |
787 | | AllowSpecial allow_special, |
788 | 497M | const Callback& callback) { |
789 | 497M | if (allow_special && require_hybrid_time29 ) { |
790 | 0 | return STATUS(NotSupported, |
791 | 0 | "Not supported to have both require_hybrid_time and allow_special"); |
792 | 0 | } |
793 | 497M | const rocksdb::Slice original_bytes(*slice); |
794 | | |
795 | 497M | RETURN_NOT_OK(callback.DecodeDocKey(slice)); |
796 | 949M | for (;;)497M { |
797 | 949M | if (allow_special && !slice->empty()41 && |
798 | 949M | IsSpecialValueType(static_cast<ValueType>(slice->cdata()[0]))13 ) { |
799 | 0 | callback.doc_hybrid_time() = DocHybridTime::kInvalid; |
800 | 0 | return Status::OK(); |
801 | 0 | } |
802 | 949M | auto decode_result = DecodeSubkey(slice, callback); |
803 | 949M | RETURN_NOT_OK_PREPEND( |
804 | 949M | decode_result, |
805 | 949M | Substitute("While decoding SubDocKey $0", ToShortDebugStr(original_bytes))); |
806 | 949M | if (!decode_result.get()) { |
807 | 497M | break; |
808 | 497M | } |
809 | 949M | } |
810 | 497M | if (slice->empty()) { |
811 | 310M | if (!require_hybrid_time310M ) { |
812 | 310M | callback.doc_hybrid_time() = DocHybridTime::kInvalid; |
813 | 310M | return Status::OK(); |
814 | 310M | } |
815 | 18.4E | return STATUS_SUBSTITUTE( |
816 | 310M | Corruption, |
817 | 310M | "Found too few bytes in the end of a SubDocKey for a type-prefixed hybrid_time: $0", |
818 | 310M | ToShortDebugStr(*slice)); |
819 | 310M | } |
820 | | |
821 | | // The reason the following is not handled as a Status is that the logic above (loop + emptiness |
822 | | // check) should guarantee this is the only possible case left. |
823 | 186M | DCHECK_EQ(ValueType::kHybridTime, DecodeValueType(*slice)); |
824 | 186M | slice->consume_byte(); |
825 | | |
826 | 186M | auto begin = slice->data(); |
827 | 186M | callback.doc_hybrid_time() = VERIFY_RESULT(DocHybridTime::DecodeFrom(slice)); |
828 | 0 | callback.DocHybridTimeSlice(Slice(begin, slice->data())); |
829 | | |
830 | 186M | return Status::OK(); |
831 | 186M | } doc_key.cc:yb::Status yb::docdb::SubDocKey::DoDecode<yb::docdb::(anonymous namespace)::DecodeSubDocKeyCallback>(yb::Slice*, yb::StronglyTypedBool<yb::docdb::HybridTimeRequired_Tag>, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::(anonymous namespace)::DecodeSubDocKeyCallback const&) Line | Count | Source | 788 | 93.0M | const Callback& callback) { | 789 | 93.0M | if (allow_special && require_hybrid_time0 ) { | 790 | 0 | return STATUS(NotSupported, | 791 | 0 | "Not supported to have both require_hybrid_time and allow_special"); | 792 | 0 | } | 793 | 93.0M | const rocksdb::Slice original_bytes(*slice); | 794 | | | 795 | 93.0M | RETURN_NOT_OK(callback.DecodeDocKey(slice)); | 796 | 205M | for (;;)93.0M { | 797 | 205M | if (allow_special && !slice->empty()0 && | 798 | 205M | IsSpecialValueType(static_cast<ValueType>(slice->cdata()[0]))0 ) { | 799 | 0 | callback.doc_hybrid_time() = DocHybridTime::kInvalid; | 800 | 0 | return Status::OK(); | 801 | 0 | } | 802 | 205M | auto decode_result = DecodeSubkey(slice, callback); | 803 | 205M | RETURN_NOT_OK_PREPEND( | 804 | 205M | decode_result, | 805 | 205M | Substitute("While decoding SubDocKey $0", ToShortDebugStr(original_bytes))); | 806 | 205M | if (!decode_result.get()) { | 807 | 93.0M | break; | 808 | 93.0M | } | 809 | 205M | } | 810 | 93.0M | if (slice->empty()) { | 811 | 0 | if (!require_hybrid_time) { | 812 | 0 | callback.doc_hybrid_time() = DocHybridTime::kInvalid; | 813 | 0 | return Status::OK(); | 814 | 0 | } | 815 | 0 | return STATUS_SUBSTITUTE( | 816 | 0 | Corruption, | 817 | 0 | "Found too few bytes in the end of a SubDocKey for a type-prefixed hybrid_time: $0", | 818 | 0 | ToShortDebugStr(*slice)); | 819 | 0 | } | 820 | | | 821 | | // The reason the following is not handled as a Status is that the logic above (loop + emptiness | 822 | | // check) should guarantee this is the only possible case left. | 823 | 93.0M | DCHECK_EQ(ValueType::kHybridTime, DecodeValueType(*slice)); | 824 | 93.0M | slice->consume_byte(); | 825 | | | 826 | 93.0M | auto begin = slice->data(); | 827 | 93.0M | callback.doc_hybrid_time() = VERIFY_RESULT(DocHybridTime::DecodeFrom(slice)); | 828 | 0 | callback.DocHybridTimeSlice(Slice(begin, slice->data())); | 829 | | | 830 | 93.0M | return Status::OK(); | 831 | 93.0M | } |
yb::Status yb::docdb::SubDocKey::DoDecode<yb::docdb::SubDocKey::DecodeCallback>(yb::Slice*, yb::StronglyTypedBool<yb::docdb::HybridTimeRequired_Tag>, yb::StronglyTypedBool<yb::docdb::AllowSpecial_Tag>, yb::docdb::SubDocKey::DecodeCallback const&) Line | Count | Source | 788 | 404M | const Callback& callback) { | 789 | 404M | if (allow_special && require_hybrid_time29 ) { | 790 | 0 | return STATUS(NotSupported, | 791 | 0 | "Not supported to have both require_hybrid_time and allow_special"); | 792 | 0 | } | 793 | 404M | const rocksdb::Slice original_bytes(*slice); | 794 | | | 795 | 404M | RETURN_NOT_OK(callback.DecodeDocKey(slice)); | 796 | 744M | for (;;)404M { | 797 | 744M | if (allow_special && !slice->empty()41 && | 798 | 744M | IsSpecialValueType(static_cast<ValueType>(slice->cdata()[0]))13 ) { | 799 | 0 | callback.doc_hybrid_time() = DocHybridTime::kInvalid; | 800 | 0 | return Status::OK(); | 801 | 0 | } | 802 | 744M | auto decode_result = DecodeSubkey(slice, callback); | 803 | 744M | RETURN_NOT_OK_PREPEND( | 804 | 744M | decode_result, | 805 | 744M | Substitute("While decoding SubDocKey $0", ToShortDebugStr(original_bytes))); | 806 | 744M | if (!decode_result.get()) { | 807 | 404M | break; | 808 | 404M | } | 809 | 744M | } | 810 | 404M | if (slice->empty()) { | 811 | 310M | if (!require_hybrid_time310M ) { | 812 | 310M | callback.doc_hybrid_time() = DocHybridTime::kInvalid; | 813 | 310M | return Status::OK(); | 814 | 310M | } | 815 | 18.4E | return STATUS_SUBSTITUTE( | 816 | 310M | Corruption, | 817 | 310M | "Found too few bytes in the end of a SubDocKey for a type-prefixed hybrid_time: $0", | 818 | 310M | ToShortDebugStr(*slice)); | 819 | 310M | } | 820 | | | 821 | | // The reason the following is not handled as a Status is that the logic above (loop + emptiness | 822 | | // check) should guarantee this is the only possible case left. | 823 | 93.6M | DCHECK_EQ(ValueType::kHybridTime, DecodeValueType(*slice)); | 824 | 93.6M | slice->consume_byte(); | 825 | | | 826 | 93.6M | auto begin = slice->data(); | 827 | 93.6M | callback.doc_hybrid_time() = VERIFY_RESULT(DocHybridTime::DecodeFrom(slice)); | 828 | 0 | callback.DocHybridTimeSlice(Slice(begin, slice->data())); | 829 | | | 830 | 93.6M | return Status::OK(); | 831 | 93.6M | } |
|
832 | | |
833 | | Status SubDocKey::FullyDecodeFrom(const rocksdb::Slice& slice, |
834 | 404M | HybridTimeRequired require_hybrid_time) { |
835 | 404M | rocksdb::Slice mutable_slice = slice; |
836 | 404M | RETURN_NOT_OK(DecodeFrom(&mutable_slice, require_hybrid_time)); |
837 | 404M | if (!mutable_slice.empty()) { |
838 | 0 | return STATUS_SUBSTITUTE(InvalidArgument, |
839 | 0 | "Expected all bytes of the slice to be decoded into SubDocKey, found $0 extra bytes: $1", |
840 | 0 | mutable_slice.size(), mutable_slice.ToDebugHexString()); |
841 | 0 | } |
842 | 404M | return Status::OK(); |
843 | 404M | } |
844 | | |
845 | | Status SubDocKey::DecodePrefixLengths( |
846 | 17.5M | Slice slice, boost::container::small_vector_base<size_t>* out) { |
847 | 17.5M | auto begin = slice.data(); |
848 | 17.5M | auto hashed_part_size = VERIFY_RESULT(DocKey::EncodedSize(slice, DocKeyPart::kUpToHash)); |
849 | 17.5M | if (hashed_part_size != 0) { |
850 | 16.2M | slice.remove_prefix(hashed_part_size); |
851 | 16.2M | out->push_back(hashed_part_size); |
852 | 16.2M | } |
853 | 23.9M | while (VERIFY_RESULT(ConsumePrimitiveValueFromKey(&slice))) { |
854 | 23.9M | out->push_back(slice.data() - begin); |
855 | 23.9M | } |
856 | 17.5M | if (!out->empty()17.5M ) { |
857 | 17.5M | if (begin[out->back()] != ValueTypeAsChar::kGroupEnd) { |
858 | 0 | return STATUS_FORMAT(Corruption, "Range keys group end expected at $0 in $1", |
859 | 0 | out->back(), Slice(begin, slice.end()).ToDebugHexString()); |
860 | 0 | } |
861 | 17.5M | ++out->back(); // Add range key group end to last prefix |
862 | 17.5M | } |
863 | 17.5M | while (VERIFY_RESULT(SubDocKey::DecodeSubkey(&slice))) { |
864 | 12.8M | out->push_back(slice.data() - begin); |
865 | 12.8M | } |
866 | | |
867 | 0 | return Status::OK(); |
868 | 17.5M | } |
869 | | |
870 | | Status SubDocKey::DecodeDocKeyAndSubKeyEnds( |
871 | 18.8M | Slice slice, boost::container::small_vector_base<size_t>* out) { |
872 | 18.8M | auto begin = slice.data(); |
873 | 18.8M | if (out->empty()) { |
874 | 1.46k | auto id_size = VERIFY_RESULT(DocKey::EncodedSize(slice, DocKeyPart::kUpToId)); |
875 | 0 | out->push_back(id_size); |
876 | 1.46k | } |
877 | 18.8M | if (out->size() == 1) { |
878 | 5.74M | auto id_size = out->front(); |
879 | 5.74M | SCHECK_GE(slice.size(), id_size + 1, Corruption, |
880 | 5.74M | Format("Cannot have exclusively ID in key $0", slice.ToDebugHexString())); |
881 | | // Identify table tombstone. |
882 | 5.74M | if (slice[0] == ValueTypeAsChar::kColocationId && |
883 | 5.74M | slice[id_size] == ValueTypeAsChar::kGroupEnd250 ) { |
884 | 4 | SCHECK_GE(slice.size(), id_size + 2, Corruption, |
885 | 4 | Format("Space for kHybridTime expected in key $0", slice.ToDebugHexString())); |
886 | 4 | SCHECK_EQ(slice[id_size + 1], ValueTypeAsChar::kHybridTime, Corruption, |
887 | 4 | Format("Hybrid time expected in key $0", slice.ToDebugHexString())); |
888 | | // Consume kGroupEnd without pushing to out because the empty key of a table tombstone |
889 | | // shouldn't count as an end. |
890 | 4 | slice.remove_prefix(id_size + 1); |
891 | 5.74M | } else { |
892 | 5.74M | auto doc_key_size = VERIFY_RESULT(DocKey::EncodedSize(slice, DocKeyPart::kWholeDocKey)); |
893 | 0 | slice.remove_prefix(doc_key_size); |
894 | 5.74M | out->push_back(doc_key_size); |
895 | 5.74M | } |
896 | 13.1M | } else { |
897 | 13.1M | slice.remove_prefix(out->back()); |
898 | 13.1M | } |
899 | 18.8M | while (VERIFY_RESULT(SubDocKey::DecodeSubkey(&slice))) { |
900 | 17.4M | out->push_back(slice.data() - begin); |
901 | 17.4M | } |
902 | | |
903 | 0 | return Status::OK(); |
904 | 18.8M | } |
905 | | |
906 | 29 | std::string SubDocKey::DebugSliceToString(Slice slice) { |
907 | 29 | auto r = DebugSliceToStringAsResult(slice); |
908 | 29 | if (r.ok()) { |
909 | 29 | return r.get(); |
910 | 29 | } |
911 | 0 | return r.status().ToString(); |
912 | 29 | } |
913 | | |
914 | 29 | Result<std::string> SubDocKey::DebugSliceToStringAsResult(Slice slice) { |
915 | 29 | SubDocKey key; |
916 | 29 | auto status = key.DecodeFrom(&slice, HybridTimeRequired::kFalse, AllowSpecial::kTrue); |
917 | 29 | if (status.ok()) { |
918 | 29 | if (slice.empty()) { |
919 | 29 | return key.ToString(); |
920 | 29 | } |
921 | 0 | return key.ToString() + "+" + slice.ToDebugHexString(); |
922 | 29 | } |
923 | 0 | return status; |
924 | 29 | } |
925 | | |
926 | 238k | string SubDocKey::ToString(AutoDecodeKeys auto_decode_keys) const { |
927 | 238k | std::string result("SubDocKey("); |
928 | 238k | result.append(doc_key_.ToString(auto_decode_keys)); |
929 | 238k | result.append(", ["); |
930 | | |
931 | 238k | AppendVectorToString(&result, subkeys_, auto_decode_keys); |
932 | | |
933 | 238k | if (has_hybrid_time()) { |
934 | 225k | if (!subkeys_.empty()) { |
935 | 220k | result.append("; "); |
936 | 220k | } |
937 | 225k | result.append(doc_ht_.ToString()); |
938 | 225k | } |
939 | 238k | result.append("])"); |
940 | 238k | return result; |
941 | 238k | } |
942 | | |
943 | 23 | Status SubDocKey::FromDocPath(const DocPath& doc_path) { |
944 | 23 | RETURN_NOT_OK(doc_key_.FullyDecodeFrom(doc_path.encoded_doc_key().AsSlice())); |
945 | 23 | subkeys_ = doc_path.subkeys(); |
946 | 23 | return Status::OK(); |
947 | 23 | } |
948 | | |
949 | 404M | void SubDocKey::Clear() { |
950 | 404M | doc_key_.Clear(); |
951 | 404M | subkeys_.clear(); |
952 | 404M | doc_ht_ = DocHybridTime::kInvalid; |
953 | 404M | } |
954 | | |
955 | 4.58k | bool SubDocKey::StartsWith(const SubDocKey& prefix) const { |
956 | 4.58k | return doc_key_ == prefix.doc_key_ && |
957 | | // Subkeys precede the hybrid_time field in the encoded representation, so the hybrid_time |
958 | | // either has to be undefined in the prefix, or the entire key must match, including |
959 | | // subkeys and the hybrid_time (in this case the prefix is the same as this key). |
960 | 4.58k | (!prefix.has_hybrid_time() || |
961 | 4.58k | (3.66k doc_ht_ == prefix.doc_ht_3.66k && prefix.num_subkeys() == num_subkeys()0 )) && |
962 | 4.58k | prefix.num_subkeys() <= num_subkeys()917 && |
963 | | // std::mismatch finds the first difference between two sequences. Prior to C++14, the |
964 | | // behavior is undefined if the second range is shorter than the first range, so we make |
965 | | // sure the potentially shorter range is first. |
966 | 4.58k | std::mismatch( |
967 | 917 | prefix.subkeys_.begin(), prefix.subkeys_.end(), subkeys_.begin() |
968 | 917 | ).first == prefix.subkeys_.end(); |
969 | 4.58k | } |
970 | | |
971 | 65.2k | bool SubDocKey::operator==(const SubDocKey& other) const { |
972 | 65.2k | if (doc_key_ != other.doc_key_ || |
973 | 65.2k | subkeys_ != other.subkeys_5.65k ) |
974 | 59.9k | return false; |
975 | | |
976 | 5.35k | const bool ht_is_valid = doc_ht_.is_valid(); |
977 | 5.35k | const bool other_ht_is_valid = other.doc_ht_.is_valid(); |
978 | 5.35k | if (ht_is_valid != other_ht_is_valid) |
979 | 0 | return false; |
980 | 5.35k | if (ht_is_valid) { |
981 | 2.08k | return doc_ht_ == other.doc_ht_; |
982 | 3.27k | } else { |
983 | | // Both keys don't have a hybrid time. |
984 | 3.27k | return true; |
985 | 3.27k | } |
986 | 5.35k | } |
987 | | |
988 | 47.1k | int SubDocKey::CompareTo(const SubDocKey& other) const { |
989 | 47.1k | int result = CompareToIgnoreHt(other); |
990 | 47.1k | if (result != 0) return result44.1k ; |
991 | | |
992 | 3.07k | const bool ht_is_valid = doc_ht_.is_valid(); |
993 | 3.07k | const bool other_ht_is_valid = other.doc_ht_.is_valid(); |
994 | 3.07k | if (ht_is_valid) { |
995 | 54 | if (other_ht_is_valid) { |
996 | | // HybridTimes are sorted in reverse order. |
997 | 54 | return -doc_ht_.CompareTo(other.doc_ht_); |
998 | 54 | } else { |
999 | | // This key has a hybrid time and the other one is identical but lacks the hybrid time, so |
1000 | | // this one is greater. |
1001 | 0 | return 1; |
1002 | 0 | } |
1003 | 3.01k | } else { |
1004 | 3.01k | if (other_ht_is_valid) { |
1005 | | // This key is a "prefix" of the other key, which has a hybrid time, so this one is less. |
1006 | 0 | return -1; |
1007 | 3.01k | } else { |
1008 | | // Neither key has a hybrid time. |
1009 | 3.01k | return 0; |
1010 | 3.01k | } |
1011 | 3.01k | } |
1012 | | |
1013 | 3.07k | } |
1014 | | |
1015 | 47.1k | int SubDocKey::CompareToIgnoreHt(const SubDocKey& other) const { |
1016 | 47.1k | int result = doc_key_.CompareTo(other.doc_key_); |
1017 | 47.1k | if (result != 0) return result42.6k ; |
1018 | | |
1019 | 4.53k | result = CompareVectors(subkeys_, other.subkeys_); |
1020 | 4.53k | return result; |
1021 | 47.1k | } |
1022 | | |
1023 | 0 | string BestEffortDocDBKeyToStr(const KeyBytes &key_bytes) { |
1024 | 0 | rocksdb::Slice mutable_slice(key_bytes.AsSlice()); |
1025 | 0 | SubDocKey subdoc_key; |
1026 | 0 | Status decode_status = subdoc_key.DecodeFrom( |
1027 | 0 | &mutable_slice, HybridTimeRequired::kFalse, AllowSpecial::kTrue); |
1028 | 0 | if (decode_status.ok()) { |
1029 | 0 | ostringstream ss; |
1030 | 0 | if (!subdoc_key.has_hybrid_time() && subdoc_key.num_subkeys() == 0) { |
1031 | | // This is really just a DocKey. |
1032 | 0 | ss << subdoc_key.doc_key().ToString(); |
1033 | 0 | } else { |
1034 | 0 | ss << subdoc_key.ToString(); |
1035 | 0 | } |
1036 | 0 | if (mutable_slice.size() > 0) { |
1037 | 0 | ss << "+" << mutable_slice.ToDebugString(); |
1038 | | // Can append the above status of why we could not decode a SubDocKey, if needed. |
1039 | 0 | } |
1040 | 0 | return ss.str(); |
1041 | 0 | } |
1042 | | |
1043 | | // We could not decode a SubDocKey at all, even without a hybrid_time. |
1044 | 0 | return key_bytes.ToString(); |
1045 | 0 | } |
1046 | | |
1047 | 0 | std::string BestEffortDocDBKeyToStr(const rocksdb::Slice& slice) { |
1048 | 0 | return BestEffortDocDBKeyToStr(KeyBytes(slice)); |
1049 | 0 | } |
1050 | | |
1051 | 287k | KeyBytes SubDocKey::AdvanceOutOfSubDoc() const { |
1052 | 287k | KeyBytes subdoc_key_no_ts = EncodeWithoutHt(); |
1053 | 287k | subdoc_key_no_ts.AppendValueType(ValueType::kMaxByte); |
1054 | 287k | return subdoc_key_no_ts; |
1055 | 287k | } |
1056 | | |
1057 | 0 | KeyBytes SubDocKey::AdvanceOutOfDocKeyPrefix() const { |
1058 | | // To construct key bytes that will seek past this DocKey and DocKeys that have the same hash |
1059 | | // components but add more range components to it, we will strip the group-end of the range |
1060 | | // components and append 0xff, which will be lexicographically higher than any key bytes |
1061 | | // with the same hash and range component prefix. For example, |
1062 | | // |
1063 | | // DocKey(0x1234, ["aa", "bb"], ["cc", "dd"]) |
1064 | | // Encoded: H\0x12\0x34$aa\x00\x00$bb\x00\x00!$cc\x00\x00$dd\x00\x00! |
1065 | | // Result: H\0x12\0x34$aa\x00\x00$bb\x00\x00!$cc\x00\x00$dd\x00\x00\xff |
1066 | | // This key will also skip all DocKeys that have additional range components, e.g. |
1067 | | // DocKey(0x1234, ["aa", "bb"], ["cc", "dd", "ee"]) |
1068 | | // (encoded as H\0x12\0x34$aa\x00\x00$bb\x00\x00!$cc\x00\x00$dd\x00\x00$ee\x00\00!). That should |
1069 | | // make no difference to DocRowwiseIterator in a valid database, because all keys actually stored |
1070 | | // in DocDB will have exactly the same number of range components. |
1071 | | // |
1072 | | // Now, suppose there are no range components in the key passed to us (note: that does not |
1073 | | // necessarily mean there are no range components in the schema, just the doc key being passed to |
1074 | | // us is a custom-constructed DocKey with no range components because the caller wants a key |
1075 | | // that will skip pass all doc keys with the same hash components prefix). Example: |
1076 | | // |
1077 | | // DocKey(0x1234, ["aa", "bb"], []) |
1078 | | // Encoded: H\0x12\0x34$aa\x00\x00$bb\x00\x00!! |
1079 | | // Result: H\0x12\0x34$aa\x00\x00$bb\x00\x00!\xff |
1080 | 0 | KeyBytes doc_key_encoded = doc_key_.Encode(); |
1081 | 0 | doc_key_encoded.RemoveValueTypeSuffix(ValueType::kGroupEnd); |
1082 | 0 | doc_key_encoded.AppendValueType(ValueType::kMaxByte); |
1083 | 0 | return doc_key_encoded; |
1084 | 0 | } |
1085 | | |
1086 | 1.08k | void SubDocKey::AppendSubKey(PrimitiveValue subkey) { |
1087 | 1.08k | subkeys_.emplace_back(std::move(subkey)); |
1088 | 1.08k | } |
1089 | | |
1090 | 0 | void SubDocKey::RemoveLastSubKey() { |
1091 | 0 | DCHECK(!subkeys_.empty()); |
1092 | 0 | subkeys_.pop_back(); |
1093 | 0 | } |
1094 | | |
1095 | 9 | void SubDocKey::KeepPrefix(size_t num_sub_keys_to_keep) { |
1096 | 9 | if (subkeys_.size() > num_sub_keys_to_keep) { |
1097 | 0 | subkeys_.resize(num_sub_keys_to_keep); |
1098 | 0 | } |
1099 | 9 | } |
1100 | | |
1101 | | // ------------------------------------------------------------------------------------------------ |
1102 | | // DocDbAwareFilterPolicy |
1103 | | // ------------------------------------------------------------------------------------------------ |
1104 | | |
1105 | | namespace { |
1106 | | |
1107 | | template<DocKeyPart doc_key_part> |
1108 | | class DocKeyComponentsExtractor : public rocksdb::FilterPolicy::KeyTransformer { |
1109 | | public: |
1110 | | DocKeyComponentsExtractor(const DocKeyComponentsExtractor&) = delete; |
1111 | | DocKeyComponentsExtractor& operator=(const DocKeyComponentsExtractor&) = delete; |
1112 | | |
1113 | 15.1k | static DocKeyComponentsExtractor& GetInstance() { |
1114 | 15.1k | static DocKeyComponentsExtractor<doc_key_part> instance; |
1115 | 15.1k | return instance; |
1116 | 15.1k | } Unexecuted instantiation: doc_key.cc:yb::docdb::(anonymous namespace)::DocKeyComponentsExtractor<(yb::docdb::DocKeyPart)1>::GetInstance() doc_key.cc:yb::docdb::(anonymous namespace)::DocKeyComponentsExtractor<(yb::docdb::DocKeyPart)3>::GetInstance() Line | Count | Source | 1113 | 15.1k | static DocKeyComponentsExtractor& GetInstance() { | 1114 | 15.1k | static DocKeyComponentsExtractor<doc_key_part> instance; | 1115 | 15.1k | return instance; | 1116 | 15.1k | } |
|
1117 | | |
1118 | | // For encoded DocKey extracts specified part, for non-DocKey returns empty key, so they will |
1119 | | // always match the filter (this is correct, but might be optimized for performance if/when |
1120 | | // needed). |
1121 | | // As of 2020-05-12 intents DB could contain keys in non-DocKey format. |
1122 | 108M | Slice Transform(Slice key) const override { |
1123 | 108M | auto size_result = DocKey::EncodedSize(key, doc_key_part); |
1124 | 108M | return size_result.ok() ? Slice(key.data(), *size_result)108M : Slice()695k ; |
1125 | 108M | } Unexecuted instantiation: doc_key.cc:yb::docdb::(anonymous namespace)::DocKeyComponentsExtractor<(yb::docdb::DocKeyPart)1>::Transform(yb::Slice) const doc_key.cc:yb::docdb::(anonymous namespace)::DocKeyComponentsExtractor<(yb::docdb::DocKeyPart)3>::Transform(yb::Slice) const Line | Count | Source | 1122 | 108M | Slice Transform(Slice key) const override { | 1123 | 108M | auto size_result = DocKey::EncodedSize(key, doc_key_part); | 1124 | 108M | return size_result.ok() ? Slice(key.data(), *size_result)108M : Slice()695k ; | 1125 | 108M | } |
|
1126 | | |
1127 | | private: |
1128 | | DocKeyComponentsExtractor() = default; |
1129 | | }; |
1130 | | |
1131 | | class HashedDocKeyUpToHashComponentsExtractor : public rocksdb::FilterPolicy::KeyTransformer { |
1132 | | public: |
1133 | | HashedDocKeyUpToHashComponentsExtractor(const HashedDocKeyUpToHashComponentsExtractor&) = delete; |
1134 | | HashedDocKeyUpToHashComponentsExtractor& operator=( |
1135 | | const HashedDocKeyUpToHashComponentsExtractor&) = delete; |
1136 | | |
1137 | 10 | static HashedDocKeyUpToHashComponentsExtractor& GetInstance() { |
1138 | 10 | static HashedDocKeyUpToHashComponentsExtractor instance; |
1139 | 10 | return instance; |
1140 | 10 | } |
1141 | | |
1142 | | // For encoded DocKey with hash code present extracts prefix up to hashed components, |
1143 | | // for non-DocKey or DocKey without hash code (for range-partitioned tables) returns empty key, |
1144 | | // so they will always match the filter. |
1145 | 10 | Slice Transform(Slice key) const override { |
1146 | 10 | auto size_result = DocKey::EncodedSizeAndHashPresent(key, DocKeyPart::kUpToHash); |
1147 | 10 | return (size_result.ok() && size_result->second) ? Slice(key.data(), size_result->first) |
1148 | 10 | : Slice()0 ; |
1149 | 10 | } |
1150 | | |
1151 | | private: |
1152 | | HashedDocKeyUpToHashComponentsExtractor() = default; |
1153 | | }; |
1154 | | |
1155 | | } // namespace |
1156 | | |
1157 | | void DocDbAwareFilterPolicyBase::CreateFilter( |
1158 | 0 | const rocksdb::Slice* keys, int n, std::string* dst) const { |
1159 | 0 | CHECK_GT(n, 0); |
1160 | 0 | return builtin_policy_->CreateFilter(keys, n, dst); |
1161 | 0 | } |
1162 | | |
1163 | | bool DocDbAwareFilterPolicyBase::KeyMayMatch( |
1164 | 0 | const rocksdb::Slice& key, const rocksdb::Slice& filter) const { |
1165 | 0 | return builtin_policy_->KeyMayMatch(key, filter); |
1166 | 0 | } |
1167 | | |
1168 | 9.91k | rocksdb::FilterBitsBuilder* DocDbAwareFilterPolicyBase::GetFilterBitsBuilder() const { |
1169 | 9.91k | return builtin_policy_->GetFilterBitsBuilder(); |
1170 | 9.91k | } |
1171 | | |
1172 | | rocksdb::FilterBitsReader* DocDbAwareFilterPolicyBase::GetFilterBitsReader( |
1173 | 3.75k | const rocksdb::Slice& contents) const { |
1174 | 3.75k | return builtin_policy_->GetFilterBitsReader(contents); |
1175 | 3.75k | } |
1176 | | |
1177 | 7.08k | rocksdb::FilterPolicy::FilterType DocDbAwareFilterPolicyBase::GetFilterType() const { |
1178 | 7.08k | return builtin_policy_->GetFilterType(); |
1179 | 7.08k | } |
1180 | | |
1181 | | const rocksdb::FilterPolicy::KeyTransformer* |
1182 | 0 | DocDbAwareHashedComponentsFilterPolicy::GetKeyTransformer() const { |
1183 | 0 | return &DocKeyComponentsExtractor<DocKeyPart::kUpToHash>::GetInstance(); |
1184 | 0 | } |
1185 | | |
1186 | | const rocksdb::FilterPolicy::KeyTransformer* |
1187 | 10 | DocDbAwareV2FilterPolicy::GetKeyTransformer() const { |
1188 | | // We want for DocDbAwareV2FilterPolicy to disable bloom filtering during read path for |
1189 | | // range-partitioned tablets (see https://github.com/yugabyte/yugabyte-db/issues/6435, |
1190 | | // https://github.com/yugabyte/yugabyte-db/issues/8731). |
1191 | 10 | return &HashedDocKeyUpToHashComponentsExtractor::GetInstance(); |
1192 | 10 | } |
1193 | | |
1194 | | const rocksdb::FilterPolicy::KeyTransformer* |
1195 | 15.1k | DocDbAwareV3FilterPolicy::GetKeyTransformer() const { |
1196 | 15.1k | return &DocKeyComponentsExtractor<DocKeyPart::kUpToHashOrFirstRange>::GetInstance(); |
1197 | 15.1k | } |
1198 | | |
1199 | 31.0M | DocKeyEncoderAfterTableIdStep DocKeyEncoder::CotableId(const Uuid& cotable_id) { |
1200 | 31.0M | if (!cotable_id.IsNil()) { |
1201 | 12.7M | std::string bytes; |
1202 | 12.7M | cotable_id.EncodeToComparable(&bytes); |
1203 | 12.7M | out_->AppendValueType(ValueType::kTableId); |
1204 | 12.7M | out_->AppendRawBytes(bytes); |
1205 | 12.7M | } |
1206 | 31.0M | return DocKeyEncoderAfterTableIdStep(out_); |
1207 | 31.0M | } |
1208 | | |
1209 | 40.0M | DocKeyEncoderAfterTableIdStep DocKeyEncoder::ColocationId(const yb::ColocationId colocation_id) { |
1210 | 40.0M | if (colocation_id != kColocationIdNotSet) { |
1211 | 3.88k | out_->AppendValueType(ValueType::kColocationId); |
1212 | 3.88k | out_->AppendUInt32(colocation_id); |
1213 | 3.88k | } |
1214 | 40.0M | return DocKeyEncoderAfterTableIdStep(out_); |
1215 | 40.0M | } |
1216 | | |
1217 | 6.48M | DocKeyEncoderAfterTableIdStep DocKeyEncoder::Schema(const class Schema& schema) { |
1218 | 6.48M | if (schema.colocation_id() != kColocationIdNotSet) { |
1219 | 302 | return ColocationId(schema.colocation_id()); |
1220 | 6.48M | } else { |
1221 | 6.48M | return CotableId(schema.cotable_id()); |
1222 | 6.48M | } |
1223 | 6.48M | } |
1224 | | |
1225 | 1.07G | Result<bool> DocKeyDecoder::DecodeCotableId(Uuid* uuid) { |
1226 | 1.07G | if (!input_.TryConsumeByte(ValueTypeAsChar::kTableId)) { |
1227 | 640M | return false; |
1228 | 640M | } |
1229 | | |
1230 | 438M | if (input_.size() < kUuidSize) { |
1231 | 26 | return STATUS_FORMAT( |
1232 | 26 | Corruption, "Not enough bytes for cotable id: $0", input_.ToDebugHexString()); |
1233 | 26 | } |
1234 | | |
1235 | 438M | if (uuid) { |
1236 | 389M | RETURN_NOT_OK(uuid->DecodeFromComparableSlice(Slice(input_.data(), kUuidSize))); |
1237 | 389M | } |
1238 | 438M | input_.remove_prefix(kUuidSize); |
1239 | | |
1240 | 438M | return true; |
1241 | 438M | } |
1242 | | |
1243 | 688M | Result<bool> DocKeyDecoder::DecodeColocationId(ColocationId* colocation_id) { |
1244 | 688M | if (input_.empty() || input_[0] != ValueTypeAsChar::kColocationId688M ) { |
1245 | 688M | return false; |
1246 | 688M | } |
1247 | | |
1248 | 176k | input_.consume_byte(); |
1249 | | |
1250 | 176k | if (input_.size() < sizeof(ColocationId)) { |
1251 | 0 | return STATUS_FORMAT( |
1252 | 0 | Corruption, "Not enough bytes for a colocation id: $0", input_.ToDebugHexString()); |
1253 | 0 | } |
1254 | | |
1255 | 176k | static_assert( |
1256 | 176k | sizeof(ColocationId) == sizeof(uint32_t), |
1257 | 176k | "It looks like the colocation ID's size has changed -- need to update encoder/decoder."); |
1258 | 176k | if (colocation_id) { |
1259 | 5.69k | *colocation_id = BigEndian::Load32(input_.data()); |
1260 | 5.69k | } |
1261 | 176k | input_.remove_prefix(sizeof(ColocationId)); |
1262 | | |
1263 | 176k | return true; |
1264 | 176k | } |
1265 | | |
1266 | 1.07G | Result<bool> DocKeyDecoder::DecodeHashCode(uint16_t* out, AllowSpecial allow_special) { |
1267 | 1.07G | if (input_.empty()) { |
1268 | 54.4k | return false; |
1269 | 54.4k | } |
1270 | | |
1271 | 1.07G | auto first_value_type = static_cast<ValueType>(input_[0]); |
1272 | | |
1273 | 1.07G | auto good_value_type = allow_special ? IsPrimitiveOrSpecialValueType(first_value_type)37.9M |
1274 | 1.07G | : IsPrimitiveValueType(first_value_type)1.04G ; |
1275 | 1.07G | if (first_value_type == ValueType::kGroupEnd) { |
1276 | 2.42M | return false; |
1277 | 2.42M | } |
1278 | | |
1279 | 1.07G | if (!good_value_type) { |
1280 | 8 | return STATUS_FORMAT(Corruption, |
1281 | 8 | "Expected first value type to be primitive or GroupEnd, got $0 in $1", |
1282 | 8 | first_value_type, input_.ToDebugHexString()); |
1283 | 8 | } |
1284 | | |
1285 | 1.07G | if (input_.empty() || input_[0] != ValueTypeAsChar::kUInt16Hash1.07G ) { |
1286 | 504M | return false; |
1287 | 504M | } |
1288 | | |
1289 | 571M | if (input_.size() < sizeof(DocKeyHash) + 1) { |
1290 | 0 | return STATUS_FORMAT( |
1291 | 0 | Corruption, |
1292 | 0 | "Could not decode a 16-bit hash component of a document key: only $0 bytes left", |
1293 | 0 | input_.size()); |
1294 | 0 | } |
1295 | | |
1296 | | // We'll need to update this code if we ever change the size of the hash field. |
1297 | 571M | static_assert(sizeof(DocKeyHash) == sizeof(uint16_t), |
1298 | 571M | "It looks like the DocKeyHash's size has changed -- need to update encoder/decoder."); |
1299 | 571M | if (out) { |
1300 | 485M | *out = BigEndian::Load16(input_.data() + 1); |
1301 | 485M | } |
1302 | 571M | input_.remove_prefix(sizeof(DocKeyHash) + 1); |
1303 | 571M | return true; |
1304 | 571M | } |
1305 | | |
1306 | 36.7M | Status DocKeyDecoder::DecodePrimitiveValue(AllowSpecial allow_special) { |
1307 | 36.7M | return DecodePrimitiveValue(nullptr /* out */, allow_special); |
1308 | 36.7M | } |
1309 | | |
1310 | 266M | Status DocKeyDecoder::DecodePrimitiveValue(PrimitiveValue* out, AllowSpecial allow_special) { |
1311 | 266M | if (allow_special && |
1312 | 266M | !input_.empty()36.8M && |
1313 | 266M | (36.8M input_[0] == ValueTypeAsChar::kLowest36.8M || input_[0] == ValueTypeAsChar::kHighest36.7M )) { |
1314 | 0 | input_.consume_byte(); |
1315 | 0 | return Status::OK(); |
1316 | 0 | } |
1317 | 266M | return PrimitiveValue::DecodeKey(&input_, out); |
1318 | 266M | } |
1319 | | |
1320 | 95.2M | Status DocKeyDecoder::ConsumeGroupEnd() { |
1321 | 95.2M | if (input_.empty() || input_[0] != ValueTypeAsChar::kGroupEnd95.1M ) { |
1322 | 0 | return STATUS_FORMAT(Corruption, "Group end expected but $0 found", input_.ToDebugHexString()); |
1323 | 0 | } |
1324 | 95.2M | input_.consume_byte(); |
1325 | 95.2M | return Status::OK(); |
1326 | 95.2M | } |
1327 | | |
1328 | 150M | bool DocKeyDecoder::GroupEnded() const { |
1329 | 150M | return input_.empty() || input_[0] == ValueTypeAsChar::kGroupEnd150M ; |
1330 | 150M | } |
1331 | | |
1332 | 45.8M | Result<bool> DocKeyDecoder::HasPrimitiveValue() { |
1333 | 45.8M | return docdb::HasPrimitiveValue(&input_, AllowSpecial::kFalse); |
1334 | 45.8M | } |
1335 | | |
1336 | 26.2M | Status DocKeyDecoder::DecodeToRangeGroup() { |
1337 | 26.2M | RETURN_NOT_OK(DecodeCotableId()); |
1338 | 26.2M | RETURN_NOT_OK(DecodeColocationId()); |
1339 | 26.2M | if (VERIFY_RESULT(DecodeHashCode())) { |
1340 | 23.5M | while (VERIFY_RESULT(HasPrimitiveValue())) { |
1341 | 23.5M | RETURN_NOT_OK(DecodePrimitiveValue()); |
1342 | 23.5M | } |
1343 | 22.2M | } |
1344 | | |
1345 | 26.2M | return Status::OK(); |
1346 | 26.2M | } |
1347 | | |
1348 | 37.8M | Result<bool> DocKeyDecoder::DecodeHashCode(AllowSpecial allow_special) { |
1349 | 37.8M | return DecodeHashCode(nullptr /* out */, allow_special); |
1350 | 37.8M | } |
1351 | | |
1352 | 291 | Result<bool> ClearRangeComponents(KeyBytes* out, AllowSpecial allow_special) { |
1353 | 291 | auto prefix_size = VERIFY_RESULT( |
1354 | 291 | DocKey::EncodedSize(out->AsSlice(), DocKeyPart::kUpToHash, allow_special)); |
1355 | 0 | auto& str = *out->mutable_data(); |
1356 | 291 | if (str.size() == prefix_size + 1 && str[prefix_size] == ValueTypeAsChar::kGroupEnd236 ) { |
1357 | 236 | return false; |
1358 | 236 | } |
1359 | 55 | if (str.size() > prefix_size) { |
1360 | 29 | str[prefix_size] = ValueTypeAsChar::kGroupEnd; |
1361 | 29 | str.Truncate(prefix_size + 1); |
1362 | 29 | } else { |
1363 | 26 | str.PushBack(ValueTypeAsChar::kGroupEnd); |
1364 | 26 | } |
1365 | 55 | return true; |
1366 | 291 | } |
1367 | | |
1368 | 18.9M | Result<bool> HashedOrFirstRangeComponentsEqual(const Slice& lhs, const Slice& rhs) { |
1369 | 18.9M | DocKeyDecoder lhs_decoder(lhs); |
1370 | 18.9M | DocKeyDecoder rhs_decoder(rhs); |
1371 | 18.9M | RETURN_NOT_OK(lhs_decoder.DecodeCotableId()); |
1372 | 18.9M | RETURN_NOT_OK(rhs_decoder.DecodeCotableId()); |
1373 | 18.9M | RETURN_NOT_OK(lhs_decoder.DecodeColocationId()); |
1374 | 18.9M | RETURN_NOT_OK(rhs_decoder.DecodeColocationId()); |
1375 | | |
1376 | 18.9M | const bool hash_present = VERIFY_RESULT(lhs_decoder.DecodeHashCode(AllowSpecial::kTrue)); |
1377 | 18.9M | if (hash_present != VERIFY_RESULT(rhs_decoder.DecodeHashCode(AllowSpecial::kTrue))) { |
1378 | 133k | return false; |
1379 | 133k | } |
1380 | | |
1381 | 18.8M | size_t consumed = lhs_decoder.ConsumedSizeFrom(lhs.data()); |
1382 | 18.8M | if (consumed != rhs_decoder.ConsumedSizeFrom(rhs.data()) || |
1383 | 18.8M | !strings::memeq(lhs.data(), rhs.data(), consumed)) { |
1384 | 8.13k | return false; |
1385 | 8.13k | } |
1386 | | |
1387 | | // Check all hashed components if present or first range component otherwise. |
1388 | 18.7M | int num_components_to_check = hash_present ? kNumValuesNoLimit15.8M : 12.90M ; |
1389 | | |
1390 | 37.1M | while (!lhs_decoder.GroupEnded() && num_components_to_check > 019.3M ) { |
1391 | 18.4M | auto lhs_start = lhs_decoder.left_input().data(); |
1392 | 18.4M | auto rhs_start = rhs_decoder.left_input().data(); |
1393 | 18.4M | auto value_type = lhs_start[0]; |
1394 | 18.4M | if (rhs_decoder.GroupEnded()18.4M || rhs_start[0] != value_type) { |
1395 | 37.8k | return false; |
1396 | 37.8k | } |
1397 | | |
1398 | 18.3M | if (PREDICT_FALSE(!IsPrimitiveOrSpecialValueType(static_cast<ValueType>(value_type)))) { |
1399 | 0 | return false; |
1400 | 0 | } |
1401 | | |
1402 | 18.3M | RETURN_NOT_OK(lhs_decoder.DecodePrimitiveValue(AllowSpecial::kTrue)); |
1403 | 18.3M | RETURN_NOT_OK(rhs_decoder.DecodePrimitiveValue(AllowSpecial::kTrue)); |
1404 | 18.3M | consumed = lhs_decoder.ConsumedSizeFrom(lhs_start); |
1405 | 18.3M | if (consumed != rhs_decoder.ConsumedSizeFrom(rhs_start) || |
1406 | 18.4M | !strings::memeq(lhs_start, rhs_start, consumed)) { |
1407 | 179 | return false; |
1408 | 179 | } |
1409 | 18.3M | --num_components_to_check; |
1410 | 18.3M | } |
1411 | 18.7M | if (num_components_to_check == 0) { |
1412 | | // We don't care about difference in rest of range components. |
1413 | 2.27M | return true; |
1414 | 2.27M | } |
1415 | | |
1416 | 16.4M | return rhs_decoder.GroupEnded(); |
1417 | 18.7M | } |
1418 | | |
1419 | 93.8M | bool DocKeyBelongsTo(Slice doc_key, const Schema& schema) { |
1420 | 93.8M | bool has_table_id = !doc_key.empty() && |
1421 | 93.8M | (doc_key[0] == ValueTypeAsChar::kTableId || |
1422 | 93.8M | doc_key[0] == ValueTypeAsChar::kColocationId49.8M ); |
1423 | | |
1424 | 93.8M | if (schema.cotable_id().IsNil() && schema.colocation_id() == kColocationIdNotSet49.7M ) { |
1425 | 49.7M | return !has_table_id; |
1426 | 49.7M | } |
1427 | | |
1428 | 44.0M | if (!has_table_id) { |
1429 | 0 | return false; |
1430 | 0 | } |
1431 | | |
1432 | 44.0M | if (doc_key[0] == ValueTypeAsChar::kTableId) { |
1433 | 43.9M | doc_key.consume_byte(); |
1434 | | |
1435 | 43.9M | uint8_t bytes[kUuidSize]; |
1436 | 43.9M | schema.cotable_id().EncodeToComparable(bytes); |
1437 | 43.9M | return doc_key.starts_with(Slice(bytes, kUuidSize)); |
1438 | 43.9M | } else { |
1439 | 125k | DCHECK(doc_key[0] == ValueTypeAsChar::kColocationId); |
1440 | 125k | doc_key.consume_byte(); |
1441 | 125k | char buf[sizeof(ColocationId)]; |
1442 | 125k | BigEndian::Store32(buf, schema.colocation_id()); |
1443 | 125k | return doc_key.starts_with(Slice(buf, sizeof(ColocationId))); |
1444 | 125k | } |
1445 | 44.0M | } |
1446 | | |
1447 | 21.5M | Result<boost::optional<DocKeyHash>> DecodeDocKeyHash(const Slice& encoded_key) { |
1448 | 21.5M | DocKey key; |
1449 | 21.5M | RETURN_NOT_OK(key.DecodeFrom(encoded_key, DocKeyPart::kUpToHashCode)); |
1450 | 21.5M | return key.has_hash() ? key.hash()16.8M : boost::optional<DocKeyHash>()4.63M ; |
1451 | 21.5M | } |
1452 | | |
1453 | | } // namespace docdb |
1454 | | } // namespace yb |