/Users/deen/code/yugabyte-db/src/yb/util/hdr_histogram.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // |
18 | | // The following only applies to changes made to this file as part of YugaByte development. |
19 | | // |
20 | | // Portions Copyright (c) YugaByte, Inc. |
21 | | // |
22 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
23 | | // in compliance with the License. You may obtain a copy of the License at |
24 | | // |
25 | | // http://www.apache.org/licenses/LICENSE-2.0 |
26 | | // |
27 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
28 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
29 | | // or implied. See the License for the specific language governing permissions and limitations |
30 | | // under the License. |
31 | | // |
32 | | #include "yb/util/hdr_histogram.h" |
33 | | |
34 | | #include <math.h> |
35 | | |
36 | | #include <limits> |
37 | | |
38 | | #include "yb/gutil/atomicops.h" |
39 | | #include "yb/gutil/bits.h" |
40 | | #include "yb/gutil/strings/substitute.h" |
41 | | |
42 | | #include "yb/util/status.h" |
43 | | |
44 | | using base::subtle::Atomic64; |
45 | | using base::subtle::NoBarrier_AtomicIncrement; |
46 | | using base::subtle::NoBarrier_Store; |
47 | | using base::subtle::NoBarrier_Load; |
48 | | using base::subtle::NoBarrier_CompareAndSwap; |
49 | | using strings::Substitute; |
50 | | using std::endl; |
51 | | |
52 | | namespace yb { |
53 | | |
54 | | HdrHistogram::HdrHistogram(uint64_t highest_trackable_value, int num_significant_digits) |
55 | | : highest_trackable_value_(highest_trackable_value), |
56 | | num_significant_digits_(num_significant_digits), |
57 | | counts_array_length_(0), |
58 | | bucket_count_(0), |
59 | | sub_bucket_count_(0), |
60 | | sub_bucket_half_count_magnitude_(0), |
61 | | sub_bucket_half_count_(0), |
62 | | sub_bucket_mask_(0), |
63 | | total_count_(0), |
64 | | total_sum_(0), |
65 | | current_count_(0), |
66 | | current_sum_(0), |
67 | | min_value_(std::numeric_limits<Atomic64>::max()), |
68 | | max_value_(0), |
69 | 3.89M | counts_(nullptr) { |
70 | 3.89M | Init(); |
71 | 3.89M | } |
72 | | |
73 | | HdrHistogram::HdrHistogram(const HdrHistogram& other) |
74 | | : highest_trackable_value_(other.highest_trackable_value_), |
75 | | num_significant_digits_(other.num_significant_digits_), |
76 | | counts_array_length_(0), |
77 | | bucket_count_(0), |
78 | | sub_bucket_count_(0), |
79 | | sub_bucket_half_count_magnitude_(0), |
80 | | sub_bucket_half_count_(0), |
81 | | sub_bucket_mask_(0), |
82 | | total_count_(0), |
83 | | total_sum_(0), |
84 | | current_count_(0), |
85 | | current_sum_(0), |
86 | | min_value_(std::numeric_limits<Atomic64>::max()), |
87 | | max_value_(0), |
88 | 5.33M | counts_(nullptr) { |
89 | 5.33M | Init(); |
90 | | |
91 | | // Not a consistent snapshot but we try to roughly keep it close. |
92 | | // Copy the sum and min first. |
93 | 5.33M | NoBarrier_Store(&total_sum_, NoBarrier_Load(&other.total_sum_)); |
94 | 5.33M | NoBarrier_Store(¤t_sum_, NoBarrier_Load(&other.current_sum_)); |
95 | 5.33M | NoBarrier_Store(&min_value_, NoBarrier_Load(&other.min_value_)); |
96 | | |
97 | 5.33M | uint64_t total_copied_count = 0; |
98 | | // Copy the counts in order of ascending magnitude. |
99 | 4.33G | for (int i = 0; i < counts_array_length_; i++4.32G ) { |
100 | 4.32G | uint64_t count = NoBarrier_Load(&other.counts_[i]); |
101 | 4.32G | NoBarrier_Store(&counts_[i], count); |
102 | 4.32G | total_copied_count += count; |
103 | 4.32G | } |
104 | | // Copy the max observed value last. |
105 | 5.33M | NoBarrier_Store(&max_value_, NoBarrier_Load(&other.max_value_)); |
106 | | // We must ensure the total is consistent with the copied counts. |
107 | 5.33M | NoBarrier_Store(&total_count_, NoBarrier_Load(&other.total_count_)); |
108 | 5.33M | NoBarrier_Store(¤t_count_, total_copied_count); |
109 | 5.33M | } |
110 | | |
111 | 5.33M | void HdrHistogram::ResetPercentiles() { |
112 | 4.33G | for (int i = 0; i < counts_array_length_; i++4.32G ) { |
113 | 4.32G | NoBarrier_Store(&counts_[i], 0); |
114 | 4.32G | } |
115 | 5.33M | NoBarrier_Store(¤t_count_, 0); |
116 | 5.33M | NoBarrier_Store(¤t_sum_, 0); |
117 | | |
118 | 5.33M | NoBarrier_Store(&min_value_, std::numeric_limits<Atomic64>::max()); |
119 | 5.33M | NoBarrier_Store(&max_value_, 0); |
120 | 5.33M | } |
121 | | |
122 | 16.5M | bool HdrHistogram::IsValidHighestTrackableValue(uint64_t highest_trackable_value) { |
123 | 16.5M | return highest_trackable_value >= kMinHighestTrackableValue; |
124 | 16.5M | } |
125 | | |
126 | 16.5M | bool HdrHistogram::IsValidNumSignificantDigits(int num_significant_digits) { |
127 | 16.5M | return num_significant_digits >= kMinValidNumSignificantDigits && |
128 | 16.5M | num_significant_digits <= kMaxValidNumSignificantDigits; |
129 | 16.5M | } |
130 | | |
131 | 9.22M | void HdrHistogram::Init() { |
132 | | // Verify parameter validity |
133 | 9.22M | CHECK(IsValidHighestTrackableValue(highest_trackable_value_)) << |
134 | 2 | Substitute("highest_trackable_value must be >= $0", kMinHighestTrackableValue); |
135 | 9.22M | CHECK(IsValidNumSignificantDigits(num_significant_digits_)) << |
136 | 2 | Substitute("num_significant_digits must be between $0 and $1", |
137 | 2 | kMinValidNumSignificantDigits, kMaxValidNumSignificantDigits); |
138 | | |
139 | 9.22M | uint32_t largest_value_with_single_unit_resolution = |
140 | 9.22M | 2 * static_cast<uint32_t>(pow(10.0, num_significant_digits_)); |
141 | | |
142 | | // We need to maintain power-of-two sub_bucket_count_ (for clean direct |
143 | | // indexing) that is large enough to provide unit resolution to at least |
144 | | // largest_value_with_single_unit_resolution. So figure out |
145 | | // largest_value_with_single_unit_resolution's nearest power-of-two |
146 | | // (rounded up), and use that: |
147 | | |
148 | | // The sub-buckets take care of the precision. |
149 | | // Each sub-bucket is sized to have enough bits for the requested |
150 | | // 10^precision accuracy. |
151 | 9.22M | int sub_bucket_count_magnitude = |
152 | 9.22M | Bits::Log2Ceiling(largest_value_with_single_unit_resolution); |
153 | 9.22M | sub_bucket_half_count_magnitude_ = |
154 | 18.4E | (sub_bucket_count_magnitude >= 1)9.22M ? sub_bucket_count_magnitude - 19.22M : 0; |
155 | | |
156 | | // sub_bucket_count_ is approx. 10^num_sig_digits (as a power of 2) |
157 | 9.22M | sub_bucket_count_ = pow(2.0, sub_bucket_half_count_magnitude_ + 1); |
158 | 9.22M | sub_bucket_mask_ = sub_bucket_count_ - 1; |
159 | 9.22M | sub_bucket_half_count_ = sub_bucket_count_ / 2; |
160 | | |
161 | | // The buckets take care of the magnitude. |
162 | | // Determine exponent range needed to support the trackable value with no |
163 | | // overflow: |
164 | 9.22M | uint64_t trackable_value = sub_bucket_count_ - 1; |
165 | 9.22M | int buckets_needed = 1; |
166 | 89.1M | while (trackable_value < highest_trackable_value_) { |
167 | 79.9M | trackable_value <<= 1; |
168 | 79.9M | buckets_needed++; |
169 | 79.9M | } |
170 | 9.22M | bucket_count_ = buckets_needed; |
171 | | |
172 | 9.22M | counts_array_length_ = (bucket_count_ + 1) * sub_bucket_half_count_; |
173 | 9.22M | counts_.reset(new Atomic64[counts_array_length_]()); // value-initialized |
174 | 9.22M | } |
175 | | |
176 | 690M | void HdrHistogram::Increment(int64_t value) { |
177 | 690M | IncrementBy(value, 1); |
178 | 690M | } |
179 | | |
180 | 690M | void HdrHistogram::IncrementBy(int64_t value, int64_t count) { |
181 | 690M | DCHECK_GE(value, 0); |
182 | 690M | DCHECK_GE(count, 0); |
183 | | |
184 | | // Dissect the value into bucket and sub-bucket parts, and derive index into |
185 | | // counts array: |
186 | 690M | int bucket_index = BucketIndex(value); |
187 | 690M | int sub_bucket_index = SubBucketIndex(value, bucket_index); |
188 | 690M | int counts_index = CountsArrayIndex(bucket_index, sub_bucket_index); |
189 | | |
190 | | // Increment bucket, total, and sum. |
191 | 690M | NoBarrier_AtomicIncrement(&counts_[counts_index], count); |
192 | 690M | NoBarrier_AtomicIncrement(&total_count_, count); |
193 | 690M | NoBarrier_AtomicIncrement(¤t_count_, count); |
194 | 690M | NoBarrier_AtomicIncrement(&total_sum_, value * count); |
195 | 690M | NoBarrier_AtomicIncrement(¤t_sum_, value * count); |
196 | | |
197 | | // Update min, if needed. |
198 | 690M | { |
199 | 690M | Atomic64 min_val; |
200 | 690M | while (PREDICT_FALSE(value < (min_val = MinValue()))) { |
201 | 2.60M | Atomic64 old_val = NoBarrier_CompareAndSwap(&min_value_, min_val, value); |
202 | 2.60M | if (PREDICT_TRUE(old_val == min_val)) break2.60M ; // CAS success. |
203 | 2.60M | } |
204 | 690M | } |
205 | | |
206 | | // Update max, if needed. |
207 | 690M | { |
208 | 690M | Atomic64 max_val; |
209 | 690M | while (PREDICT_FALSE(value > (max_val = MaxValue()))) { |
210 | 2.28M | Atomic64 old_val = NoBarrier_CompareAndSwap(&max_value_, max_val, value); |
211 | 2.28M | if (PREDICT_TRUE(old_val == max_val)) break2.28M ; // CAS success. |
212 | 2.28M | } |
213 | 690M | } |
214 | 690M | } |
215 | | |
216 | | void HdrHistogram::IncrementWithExpectedInterval(int64_t value, |
217 | 100 | int64_t expected_interval_between_samples) { |
218 | 100 | Increment(value); |
219 | 100 | if (expected_interval_between_samples <= 0) { |
220 | 0 | return; |
221 | 0 | } |
222 | 100 | for (int64_t missing_value = value - expected_interval_between_samples; |
223 | 190 | missing_value >= expected_interval_between_samples; |
224 | 100 | missing_value -= expected_interval_between_samples90 ) { |
225 | 90 | Increment(missing_value); |
226 | 90 | } |
227 | 100 | } |
228 | | |
229 | | //////////////////////////////////// |
230 | | |
231 | 690M | int HdrHistogram::BucketIndex(uint64_t value) const { |
232 | 690M | if (PREDICT_FALSE(value > highest_trackable_value_)) { |
233 | 463M | value = highest_trackable_value_; |
234 | 463M | } |
235 | | // Here we are calculating the power-of-2 magnitude of the value with a |
236 | | // correction for precision in the first bucket. |
237 | | // Smallest power of 2 containing value. |
238 | 690M | int pow2ceiling = Bits::Log2Ceiling64(value | sub_bucket_mask_); |
239 | 690M | return pow2ceiling - (sub_bucket_half_count_magnitude_ + 1); |
240 | 690M | } |
241 | | |
242 | 689M | int HdrHistogram::SubBucketIndex(uint64_t value, int bucket_index) const { |
243 | 689M | if (PREDICT_FALSE(value > highest_trackable_value_)) { |
244 | 464M | value = highest_trackable_value_; |
245 | 464M | } |
246 | | // We hack off the magnitude and are left with only the relevant precision |
247 | | // portion, which gives us a direct index into the sub-bucket. TODO: Right?? |
248 | 689M | return static_cast<int>(value >> bucket_index); |
249 | 689M | } |
250 | | |
251 | 767M | int HdrHistogram::CountsArrayIndex(int bucket_index, int sub_bucket_index) const { |
252 | 767M | DCHECK(sub_bucket_index < sub_bucket_count_); |
253 | 767M | DCHECK(bucket_index < bucket_count_); |
254 | 767M | DCHECK(bucket_index == 0 || (sub_bucket_index >= sub_bucket_half_count_)); |
255 | | // Calculate the index for the first entry in the bucket: |
256 | | // (The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count_) ): |
257 | 767M | int bucket_base_index = (bucket_index + 1) << sub_bucket_half_count_magnitude_; |
258 | | // Calculate the offset in the bucket: |
259 | 767M | int offset_in_bucket = sub_bucket_index - sub_bucket_half_count_; |
260 | 767M | return bucket_base_index + offset_in_bucket; |
261 | 767M | } |
262 | | |
263 | 77.0M | uint64_t HdrHistogram::CountAt(int bucket_index, int sub_bucket_index) const { |
264 | 77.0M | return counts_[CountsArrayIndex(bucket_index, sub_bucket_index)]; |
265 | 77.0M | } |
266 | | |
267 | 19 | uint64_t HdrHistogram::CountInBucketForValue(uint64_t value) const { |
268 | 19 | int bucket_index = BucketIndex(value); |
269 | 19 | int sub_bucket_index = SubBucketIndex(value, bucket_index); |
270 | 19 | return CountAt(bucket_index, sub_bucket_index); |
271 | 19 | } |
272 | | |
273 | 853k | uint64_t HdrHistogram::ValueFromIndex(int bucket_index, int sub_bucket_index) { |
274 | 853k | return static_cast<uint64_t>(sub_bucket_index) << bucket_index; |
275 | 853k | } |
276 | | |
277 | | //////////////////////////////////// |
278 | | |
279 | 201 | uint64_t HdrHistogram::SizeOfEquivalentValueRange(uint64_t value) const { |
280 | 201 | int bucket_index = BucketIndex(value); |
281 | 201 | int sub_bucket_index = SubBucketIndex(value, bucket_index); |
282 | 201 | uint64_t distance_to_next_value = |
283 | 201 | (1 << ((sub_bucket_index >= sub_bucket_count_) ? (bucket_index + 1)0 : bucket_index)); |
284 | 201 | return distance_to_next_value; |
285 | 201 | } |
286 | | |
287 | 207 | uint64_t HdrHistogram::LowestEquivalentValue(uint64_t value) const { |
288 | 207 | int bucket_index = BucketIndex(value); |
289 | 207 | int sub_bucket_index = SubBucketIndex(value, bucket_index); |
290 | 207 | uint64_t this_value_base_level = ValueFromIndex(bucket_index, sub_bucket_index); |
291 | 207 | return this_value_base_level; |
292 | 207 | } |
293 | | |
294 | 100 | uint64_t HdrHistogram::HighestEquivalentValue(uint64_t value) const { |
295 | 100 | return NextNonEquivalentValue(value) - 1; |
296 | 100 | } |
297 | | |
298 | 101 | uint64_t HdrHistogram::MedianEquivalentValue(uint64_t value) const { |
299 | 101 | return (LowestEquivalentValue(value) + (SizeOfEquivalentValueRange(value) >> 1)); |
300 | 101 | } |
301 | | |
302 | 100 | uint64_t HdrHistogram::NextNonEquivalentValue(uint64_t value) const { |
303 | 100 | return LowestEquivalentValue(value) + SizeOfEquivalentValueRange(value); |
304 | 100 | } |
305 | | |
306 | 0 | bool HdrHistogram::ValuesAreEquivalent(uint64_t value1, uint64_t value2) const { |
307 | 0 | return (LowestEquivalentValue(value1) == LowestEquivalentValue(value2)); |
308 | 0 | } |
309 | | |
310 | 694M | uint64_t HdrHistogram::MinValue() const { |
311 | 694M | if (PREDICT_FALSE(CurrentCount() == 0)) { |
312 | 5.15M | return 0; |
313 | 5.15M | } |
314 | 689M | return NoBarrier_Load(&min_value_); |
315 | 694M | } |
316 | | |
317 | 694M | uint64_t HdrHistogram::MaxValue() const { |
318 | 694M | if (PREDICT_FALSE(CurrentCount() == 0)) { |
319 | 5.16M | return 0; |
320 | 5.16M | } |
321 | 689M | return NoBarrier_Load(&max_value_); |
322 | 694M | } |
323 | | |
324 | 5.33M | double HdrHistogram::MeanValue() const { |
325 | 5.33M | uint64_t count = CurrentCount(); |
326 | 5.33M | if (PREDICT_FALSE(count == 0)) return 0.05.16M ; |
327 | 170k | return static_cast<double>(CurrentSum()) / count; |
328 | 5.33M | } |
329 | | |
330 | 26.6M | uint64_t HdrHistogram::ValueAtPercentile(double percentile) const { |
331 | 26.6M | uint64_t count = CurrentCount(); |
332 | 26.6M | if (PREDICT_FALSE(count == 0)) return 025.7M ; |
333 | | |
334 | 853k | double requested_percentile = std::min(percentile, 100.0); // Truncate down to 100% |
335 | 853k | uint64_t count_at_percentile = |
336 | 853k | static_cast<uint64_t>(((requested_percentile / 100.0) * count) + 0.5); // Round |
337 | | // Make sure we at least reach the first recorded entry |
338 | 853k | count_at_percentile = std::max(count_at_percentile, static_cast<uint64_t>(1)); |
339 | | |
340 | 853k | uint64_t total_to_current_iJ = 0; |
341 | 1.22M | for (int i = 0; i < bucket_count_; i++368k ) { |
342 | 1.22M | int j = (i == 0) ? 0853k : (sub_bucket_count_ / 2)368k ; |
343 | 77.4M | for (; j < sub_bucket_count_; j++76.1M ) { |
344 | 77.0M | total_to_current_iJ += CountAt(i, j); |
345 | 77.0M | if (total_to_current_iJ >= count_at_percentile) { |
346 | 853k | uint64_t valueAtIndex = ValueFromIndex(i, j); |
347 | 853k | return valueAtIndex; |
348 | 853k | } |
349 | 77.0M | } |
350 | 1.22M | } |
351 | | |
352 | 0 | LOG(DFATAL) << "Fell through while iterating, likely concurrent modification of histogram"; |
353 | 0 | return 0; |
354 | 853k | } |
355 | | |
356 | 2 | void HdrHistogram::DumpHumanReadable(std::ostream* out) const { |
357 | 2 | *out << "Total Count: " << TotalCount() << endl; |
358 | 2 | *out << "Mean: " << MeanValue() << endl; |
359 | 2 | *out << "Percentiles:" << endl; |
360 | 2 | *out << "CountInBuckets: " << CurrentCount() << endl; |
361 | 2 | *out << " 0% (min) = " << MinValue() << endl; |
362 | 2 | *out << " 25% = " << ValueAtPercentile(25) << endl; |
363 | 2 | *out << " 50% (med) = " << ValueAtPercentile(50) << endl; |
364 | 2 | *out << " 75% = " << ValueAtPercentile(75) << endl; |
365 | 2 | *out << " 95% = " << ValueAtPercentile(95) << endl; |
366 | 2 | *out << " 99% = " << ValueAtPercentile(99) << endl; |
367 | 2 | *out << " 99.9% = " << ValueAtPercentile(99.9) << endl; |
368 | 2 | *out << " 99.99% = " << ValueAtPercentile(99.99) << endl; |
369 | 2 | *out << " 100% (max) = " << MaxValue() << endl; |
370 | 2 | if (MaxValue() >= highest_trackable_value()) { |
371 | 0 | *out << "*NOTE: some values were greater than highest trackable value" << endl; |
372 | 0 | } |
373 | 2 | } |
374 | | |
375 | | /////////////////////////////////////////////////////////////////////// |
376 | | // AbstractHistogramIterator |
377 | | /////////////////////////////////////////////////////////////////////// |
378 | | |
379 | | AbstractHistogramIterator::AbstractHistogramIterator(const HdrHistogram* histogram) |
380 | | : histogram_(CHECK_NOTNULL(histogram)), |
381 | | cur_iter_val_(), |
382 | | histogram_total_count_(histogram_->CurrentCount()), |
383 | | current_bucket_index_(0), |
384 | | current_sub_bucket_index_(0), |
385 | | current_value_at_index_(0), |
386 | | next_bucket_index_(0), |
387 | | next_sub_bucket_index_(1), |
388 | | next_value_at_index_(1), |
389 | | prev_value_iterated_to_(0), |
390 | | total_count_to_prev_index_(0), |
391 | | total_count_to_current_index_(0), |
392 | | total_value_to_current_index_(0), |
393 | | count_at_this_value_(0), |
394 | 1 | fresh_sub_bucket_(true) { |
395 | 1 | } |
396 | | |
397 | 101 | bool AbstractHistogramIterator::HasNext() const { |
398 | 101 | return total_count_to_current_index_ < histogram_total_count_; |
399 | 101 | } |
400 | | |
401 | 100 | Status AbstractHistogramIterator::Next(HistogramIterationValue* value) { |
402 | 100 | if (histogram_->CurrentCount() != histogram_total_count_) { |
403 | 0 | return STATUS(IllegalState, "Concurrently modified histogram while traversing it"); |
404 | 0 | } |
405 | | |
406 | | // Move through the sub buckets and buckets until we hit the next reporting level: |
407 | 200 | while (100 !ExhaustedSubBuckets()) { |
408 | 200 | count_at_this_value_ = |
409 | 200 | histogram_->CountAt(current_bucket_index_, current_sub_bucket_index_); |
410 | 200 | if (fresh_sub_bucket_) { // Don't add unless we've incremented since last bucket... |
411 | 101 | total_count_to_current_index_ += count_at_this_value_; |
412 | 101 | total_value_to_current_index_ += |
413 | 101 | count_at_this_value_ * histogram_->MedianEquivalentValue(current_value_at_index_); |
414 | 101 | fresh_sub_bucket_ = false; |
415 | 101 | } |
416 | 200 | if (ReachedIterationLevel()) { |
417 | 100 | uint64_t value_iterated_to = ValueIteratedTo(); |
418 | | |
419 | | // Update iterator value. |
420 | 100 | cur_iter_val_.value_iterated_to = value_iterated_to; |
421 | 100 | cur_iter_val_.value_iterated_from = prev_value_iterated_to_; |
422 | 100 | cur_iter_val_.count_at_value_iterated_to = count_at_this_value_; |
423 | 100 | cur_iter_val_.count_added_in_this_iteration_step = |
424 | 100 | (total_count_to_current_index_ - total_count_to_prev_index_); |
425 | 100 | cur_iter_val_.total_count_to_this_value = total_count_to_current_index_; |
426 | 100 | cur_iter_val_.total_value_to_this_value = total_value_to_current_index_; |
427 | 100 | cur_iter_val_.percentile = |
428 | 100 | ((100.0 * total_count_to_current_index_) / histogram_total_count_); |
429 | 100 | cur_iter_val_.percentile_level_iterated_to = PercentileIteratedTo(); |
430 | | |
431 | 100 | prev_value_iterated_to_ = value_iterated_to; |
432 | 100 | total_count_to_prev_index_ = total_count_to_current_index_; |
433 | | // Move the next percentile reporting level forward. |
434 | 100 | IncrementIterationLevel(); |
435 | | |
436 | 100 | *value = cur_iter_val_; |
437 | 100 | return Status::OK(); |
438 | 100 | } |
439 | 100 | IncrementSubBucket(); |
440 | 100 | } |
441 | 0 | return STATUS(IllegalState, "Histogram array index out of bounds while traversing"); |
442 | 100 | } |
443 | | |
444 | 100 | double AbstractHistogramIterator::PercentileIteratedTo() const { |
445 | 100 | return (100.0 * static_cast<double>(total_count_to_current_index_)) / histogram_total_count_; |
446 | 100 | } |
447 | | |
448 | 0 | double AbstractHistogramIterator::PercentileIteratedFrom() const { |
449 | 0 | return (100.0 * static_cast<double>(total_count_to_prev_index_)) / histogram_total_count_; |
450 | 0 | } |
451 | | |
452 | 100 | uint64_t AbstractHistogramIterator::ValueIteratedTo() const { |
453 | 100 | return histogram_->HighestEquivalentValue(current_value_at_index_); |
454 | 100 | } |
455 | | |
456 | 200 | bool AbstractHistogramIterator::ExhaustedSubBuckets() const { |
457 | 200 | return (current_bucket_index_ >= histogram_->bucket_count_); |
458 | 200 | } |
459 | | |
460 | 100 | void AbstractHistogramIterator::IncrementSubBucket() { |
461 | 100 | fresh_sub_bucket_ = true; |
462 | | // Take on the next index: |
463 | 100 | current_bucket_index_ = next_bucket_index_; |
464 | 100 | current_sub_bucket_index_ = next_sub_bucket_index_; |
465 | 100 | current_value_at_index_ = next_value_at_index_; |
466 | | // Figure out the next next index: |
467 | 100 | next_sub_bucket_index_++; |
468 | 100 | if (next_sub_bucket_index_ >= histogram_->sub_bucket_count_) { |
469 | 0 | next_sub_bucket_index_ = histogram_->sub_bucket_half_count_; |
470 | 0 | next_bucket_index_++; |
471 | 0 | } |
472 | 100 | next_value_at_index_ = HdrHistogram::ValueFromIndex(next_bucket_index_, next_sub_bucket_index_); |
473 | 100 | } |
474 | | |
475 | | /////////////////////////////////////////////////////////////////////// |
476 | | // RecordedValuesIterator |
477 | | /////////////////////////////////////////////////////////////////////// |
478 | | |
479 | | RecordedValuesIterator::RecordedValuesIterator(const HdrHistogram* histogram) |
480 | | : AbstractHistogramIterator(histogram), |
481 | | visited_sub_bucket_index_(-1), |
482 | 1 | visited_bucket_index_(-1) { |
483 | 1 | } |
484 | | |
485 | 100 | void RecordedValuesIterator::IncrementIterationLevel() { |
486 | 100 | visited_sub_bucket_index_ = current_sub_bucket_index_; |
487 | 100 | visited_bucket_index_ = current_bucket_index_; |
488 | 100 | } |
489 | | |
490 | 200 | bool RecordedValuesIterator::ReachedIterationLevel() const { |
491 | 200 | uint64_t current_ij_count = |
492 | 200 | histogram_->CountAt(current_bucket_index_, current_sub_bucket_index_); |
493 | 200 | return current_ij_count != 0 && |
494 | 200 | (199 (visited_sub_bucket_index_ != current_sub_bucket_index_)199 || |
495 | 199 | (visited_bucket_index_ != current_bucket_index_)99 ); |
496 | 200 | } |
497 | | |
498 | | /////////////////////////////////////////////////////////////////////// |
499 | | // PercentileIterator |
500 | | /////////////////////////////////////////////////////////////////////// |
501 | | |
502 | | PercentileIterator::PercentileIterator(const HdrHistogram* histogram, |
503 | | int percentile_ticks_per_half_distance) |
504 | | : AbstractHistogramIterator(histogram), |
505 | | percentile_ticks_per_half_distance_(percentile_ticks_per_half_distance), |
506 | | percentile_level_to_iterate_to_(0.0), |
507 | | percentile_level_to_iterate_from_(0.0), |
508 | 0 | reached_last_recorded_value_(false) { |
509 | 0 | } |
510 | | |
511 | 0 | bool PercentileIterator::HasNext() const { |
512 | 0 | if (AbstractHistogramIterator::HasNext()) { |
513 | 0 | return true; |
514 | 0 | } |
515 | | // We want one additional last step to 100% |
516 | 0 | if (!reached_last_recorded_value_ && (histogram_total_count_ > 0)) { |
517 | 0 | const_cast<PercentileIterator*>(this)->percentile_level_to_iterate_to_ = 100.0; |
518 | 0 | const_cast<PercentileIterator*>(this)->reached_last_recorded_value_ = true; |
519 | 0 | return true; |
520 | 0 | } |
521 | 0 | return false; |
522 | 0 | } |
523 | | |
524 | 0 | double PercentileIterator::PercentileIteratedTo() const { |
525 | 0 | return percentile_level_to_iterate_to_; |
526 | 0 | } |
527 | | |
528 | | |
529 | 0 | double PercentileIterator::PercentileIteratedFrom() const { |
530 | 0 | return percentile_level_to_iterate_from_; |
531 | 0 | } |
532 | | |
533 | 0 | void PercentileIterator::IncrementIterationLevel() { |
534 | 0 | percentile_level_to_iterate_from_ = percentile_level_to_iterate_to_; |
535 | | // TODO: Can this expression be simplified? |
536 | 0 | uint64_t percentile_reporting_ticks = percentile_ticks_per_half_distance_ * |
537 | 0 | static_cast<uint64_t>(pow(2.0, |
538 | 0 | static_cast<int>(log(100.0 / (100.0 - (percentile_level_to_iterate_to_))) / log(2)) + 1)); |
539 | 0 | percentile_level_to_iterate_to_ += 100.0 / percentile_reporting_ticks; |
540 | 0 | } |
541 | | |
542 | 0 | bool PercentileIterator::ReachedIterationLevel() const { |
543 | 0 | if (count_at_this_value_ == 0) return false; |
544 | 0 | double current_percentile = |
545 | 0 | (100.0 * static_cast<double>(total_count_to_current_index_)) / histogram_total_count_; |
546 | 0 | return (current_percentile >= percentile_level_to_iterate_to_); |
547 | 0 | } |
548 | | |
549 | | } // namespace yb |