/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/histogram.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
21 | | // Use of this source code is governed by a BSD-style license that can be |
22 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
23 | | |
24 | | #pragma once |
25 | | #include "yb/rocksdb/statistics.h" |
26 | | |
27 | | #include <atomic> |
28 | | #include <cassert> |
29 | | #include <string> |
30 | | #include <vector> |
31 | | #include <map> |
32 | | |
33 | | #include <string.h> |
34 | | |
35 | | namespace rocksdb { |
36 | | |
37 | | static constexpr int kHistogramNumBuckets = 138; |
38 | | |
39 | | class HistogramBucketMapper { |
40 | | public: |
41 | | |
42 | | HistogramBucketMapper(); |
43 | | |
44 | | // converts a value to the bucket index. |
45 | | size_t IndexForValue(const uint64_t value) const; |
46 | | // number of buckets required. |
47 | | |
48 | 747k | size_t BucketCount() const { |
49 | 747k | return bucketValues_.size(); |
50 | 747k | } |
51 | | |
52 | 37 | uint64_t LastValue() const { |
53 | 37 | return maxBucketValue_; |
54 | 37 | } |
55 | | |
56 | 0 | uint64_t FirstValue() const { |
57 | 0 | return minBucketValue_; |
58 | 0 | } |
59 | | |
60 | 40.9k | uint64_t BucketLimit(const size_t bucketNumber) const { |
61 | 40.9k | assert(bucketNumber < BucketCount()); |
62 | 0 | return bucketValues_[bucketNumber]; |
63 | 40.9k | } |
64 | | |
65 | | private: |
66 | | const std::vector<uint64_t> bucketValues_; |
67 | | const uint64_t maxBucketValue_; |
68 | | const uint64_t minBucketValue_; |
69 | | std::map<uint64_t, uint64_t> valueIndexMap_; |
70 | | |
71 | | }; |
72 | | |
73 | | class HistogramImpl { |
74 | | public: |
75 | 546k | HistogramImpl() { |
76 | | // Original comment from RocksDB: "this is BucketMapper:LastValue()". |
77 | | // TODO: is there a cleaner way to set it here? |
78 | 546k | set_min(1000000000); |
79 | 546k | set_max(0); |
80 | 546k | set_num(0); |
81 | 546k | set_sum(0); |
82 | 546k | set_sum_squares(0); |
83 | 75.9M | for (int i = 0; i < kHistogramNumBuckets; ++i75.4M ) { |
84 | 75.4M | set_bucket(i, 0); |
85 | 75.4M | } |
86 | 546k | } |
87 | | |
88 | 0 | HistogramImpl(const HistogramImpl& other) { |
89 | 0 | set_min(other.min()); |
90 | 0 | set_max(other.max()); |
91 | 0 | set_num(other.num()); |
92 | 0 | set_sum(other.sum()); |
93 | 0 | set_sum_squares(other.sum_squares()); |
94 | 0 | for (int i = 0; i < kHistogramNumBuckets; ++i) { |
95 | 0 | set_bucket(i, other.bucket(i)); |
96 | 0 | } |
97 | 0 | } |
98 | | |
99 | | virtual void Clear(); |
100 | | virtual bool Empty(); |
101 | | virtual void Add(uint64_t value); |
102 | | void Merge(const HistogramImpl& other); |
103 | | |
104 | | virtual std::string ToString() const; |
105 | | |
106 | | virtual double Median() const; |
107 | | virtual double Percentile(double p) const; |
108 | | virtual double Average() const; |
109 | | virtual double StandardDeviation() const; |
110 | | virtual void Data(HistogramData * const data) const; |
111 | | |
112 | 507k | virtual ~HistogramImpl() {} |
113 | | |
114 | | private: |
115 | | std::atomic<double> min_; |
116 | | std::atomic<double> max_; |
117 | | std::atomic<uint64_t> num_; |
118 | | std::atomic<double> sum_; |
119 | | std::atomic<double> sum_squares_; |
120 | | std::atomic<uint64_t> buckets_[kHistogramNumBuckets]; |
121 | | |
122 | | // We don't use any synchronization because that's what RocksDB has been doing, and we need to |
123 | | // measure the performance impact before using a stronger memory order. |
124 | | static constexpr auto kMemoryOrder = std::memory_order_relaxed; |
125 | | |
126 | 5.11M | double min() const { return min_.load(kMemoryOrder); } |
127 | | |
128 | 561k | void set_min(double new_min) { min_.store(new_min, kMemoryOrder); } |
129 | | |
130 | 5.08M | void update_min(double value) { |
131 | 5.08M | double previous_min = min(); |
132 | 5.08M | if (value < previous_min) { |
133 | | // This has a race condition. We need to do a compare-and-swap loop here to if we want a |
134 | | // precise statistic. |
135 | 14.5k | set_min(value); |
136 | 14.5k | } |
137 | 5.08M | } |
138 | | |
139 | 5.11M | double max() const { return max_.load(kMemoryOrder); } |
140 | | |
141 | 562k | void set_max(double new_max) { max_.store(new_max, kMemoryOrder); } |
142 | | |
143 | 5.08M | void update_max(double value) { |
144 | 5.08M | double previous_max = max(); |
145 | 5.08M | if (value > previous_max) { |
146 | 16.0k | set_max(value); |
147 | 16.0k | } |
148 | 5.08M | } |
149 | | |
150 | 3.89k | uint64_t num() const { return num_.load(kMemoryOrder); } |
151 | 5.08M | void add_to_num(uint64_t delta) { num_.fetch_add(delta, kMemoryOrder); } |
152 | 546k | void set_num(int64_t new_num) { num_.store(new_num, kMemoryOrder); } |
153 | | |
154 | 0 | double sum() const { return sum_.load(kMemoryOrder); } |
155 | 546k | void set_sum(double value) { sum_.store(value, kMemoryOrder); } |
156 | | |
157 | 5.08M | void add_to_sum(double delta) { |
158 | 5.08M | sum_.store(sum_.load(kMemoryOrder) + delta, kMemoryOrder); |
159 | 5.08M | } |
160 | | |
161 | 5.08M | void add_to_sum_squares(double delta) { |
162 | 5.08M | sum_squares_.store(sum_squares_.load(kMemoryOrder) + delta, kMemoryOrder); |
163 | 5.08M | } |
164 | | |
165 | 0 | double sum_squares() const { return sum_squares_.load(kMemoryOrder); } |
166 | 546k | void set_sum_squares(double value) { sum_squares_.store(value, kMemoryOrder); } |
167 | | |
168 | 560k | uint64_t bucket(int b) const { return buckets_[b].load(kMemoryOrder); } |
169 | 5.08M | void add_to_bucket(int b, uint64_t delta) { |
170 | 5.08M | buckets_[b].fetch_add(delta, kMemoryOrder); |
171 | 5.08M | } |
172 | 75.4M | void set_bucket(int b, uint64_t value) { |
173 | 75.4M | buckets_[b].store(value, kMemoryOrder); |
174 | 75.4M | } |
175 | | |
176 | | |
177 | | }; |
178 | | |
179 | | } // namespace rocksdb |