/Users/deen/code/yugabyte-db/src/yb/util/atomic.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // |
18 | | // The following only applies to changes made to this file as part of YugaByte development. |
19 | | // |
20 | | // Portions Copyright (c) YugaByte, Inc. |
21 | | // |
22 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
23 | | // in compliance with the License. You may obtain a copy of the License at |
24 | | // |
25 | | // http://www.apache.org/licenses/LICENSE-2.0 |
26 | | // |
27 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
28 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
29 | | // or implied. See the License for the specific language governing permissions and limitations |
30 | | // under the License. |
31 | | // |
32 | | |
33 | | #ifndef YB_UTIL_ATOMIC_H |
34 | | #define YB_UTIL_ATOMIC_H |
35 | | |
36 | | #include <algorithm> |
37 | | #include <atomic> |
38 | | #include <thread> |
39 | | |
40 | | #include <boost/atomic.hpp> |
41 | | #include <boost/type_traits/make_signed.hpp> |
42 | | #include <glog/logging.h> |
43 | | |
44 | | #include "yb/gutil/atomicops.h" |
45 | | #include "yb/gutil/macros.h" |
46 | | #include "yb/gutil/port.h" |
47 | | |
48 | | #include "yb/util/cast.h" |
49 | | #include "yb/util/random_util.h" |
50 | | |
51 | | namespace yb { |
52 | | |
53 | | // See top-level comments in yb/gutil/atomicops.h for further |
54 | | // explanations of these levels. |
55 | | enum MemoryOrder { |
56 | | // Relaxed memory ordering, doesn't use any barriers. |
57 | | kMemOrderNoBarrier = 0, |
58 | | |
59 | | // Ensures that no later memory access by the same thread can be |
60 | | // reordered ahead of the operation. |
61 | | kMemOrderAcquire = 1, |
62 | | |
63 | | // Ensures that no previous memory access by the same thread can be |
64 | | // reordered after the operation. |
65 | | kMemOrderRelease = 2, |
66 | | |
67 | | // Ensures that neither previous NOR later memory access by the same |
68 | | // thread can be reordered after the operation. |
69 | | kMemOrderBarrier = 3, |
70 | | }; |
71 | | |
72 | | // Atomic integer class inspired by Impala's AtomicInt and |
73 | | // std::atomic<> in C++11. |
74 | | // |
75 | | // NOTE: All of public operations use an implicit memory order of |
76 | | // kMemOrderNoBarrier unless otherwise specified. |
77 | | // |
78 | | // Unlike std::atomic<>, overflowing an unsigned AtomicInt via Increment or |
79 | | // IncrementBy is undefined behavior (it is also undefined for signed types, |
80 | | // as always). |
81 | | // |
82 | | // See also: yb/gutil/atomicops.h |
83 | | template<typename T> |
84 | | class AtomicInt { |
85 | | public: |
86 | | // Initialize the underlying value to 'initial_value'. The |
87 | | // initialization performs a Store with 'kMemOrderNoBarrier'. |
88 | | explicit AtomicInt(T initial_value); |
89 | | |
90 | | // Returns the underlying value. |
91 | | // |
92 | | // Does not support 'kMemOrderBarrier'. |
93 | | T Load(MemoryOrder mem_order = kMemOrderNoBarrier) const; |
94 | | |
95 | | // Sets the underlying value to 'new_value'. |
96 | | // |
97 | | // Does not support 'kMemOrderBarrier'. |
98 | | void Store(T new_value, MemoryOrder mem_order = kMemOrderNoBarrier); |
99 | | |
100 | | // Iff the underlying value is equal to 'expected_val', sets the |
101 | | // underlying value to 'new_value' and returns true; returns false |
102 | | // otherwise. |
103 | | // |
104 | | // Does not support 'kMemOrderBarrier'. |
105 | | bool CompareAndSet(T expected_val, T new_value, MemoryOrder mem_order = kMemOrderNoBarrier); |
106 | | |
107 | | // Iff the underlying value is equal to 'expected_val', sets the |
108 | | // underlying value to 'new_value' and returns |
109 | | // 'expected_val'. Otherwise, returns the current underlying |
110 | | // value. |
111 | | // |
112 | | // Does not support 'kMemOrderBarrier'. |
113 | | T CompareAndSwap(T expected_val, T new_value, MemoryOrder mem_order = kMemOrderNoBarrier); |
114 | | |
115 | | // Sets the underlying value to 'new_value' iff 'new_value' is |
116 | | // greater than the current underlying value. |
117 | | // |
118 | | // Does not support 'kMemOrderBarrier'. |
119 | | void StoreMax(T new_value, MemoryOrder mem_order = kMemOrderNoBarrier); |
120 | | |
121 | | // Sets the underlying value to 'new_value' iff 'new_value' is less |
122 | | // than the current underlying value. |
123 | | // |
124 | | // Does not support 'kMemOrderBarrier'. |
125 | | void StoreMin(T new_value, MemoryOrder mem_order = kMemOrderNoBarrier); |
126 | | |
127 | | // Increments the underlying value by 1 and returns the new |
128 | | // underlying value. |
129 | | // |
130 | | // Does not support 'kMemOrderAcquire' or 'kMemOrderRelease'. |
131 | | T Increment(MemoryOrder mem_order = kMemOrderNoBarrier); |
132 | | |
133 | | // Increments the underlying value by 'delta' and returns the new |
134 | | // underlying value. |
135 | | |
136 | | // Does not support 'kKemOrderAcquire' or 'kMemOrderRelease'. |
137 | | T IncrementBy(T delta, MemoryOrder mem_order = kMemOrderNoBarrier); |
138 | | |
139 | | // Sets the underlying value to 'new_value' and returns the previous |
140 | | // underlying value. |
141 | | // |
142 | | // Does not support 'kMemOrderBarrier'. |
143 | | T Exchange(T new_value, MemoryOrder mem_order = kMemOrderNoBarrier); |
144 | | |
145 | | private: |
146 | | // If a method 'caller' doesn't support memory order described as |
147 | | // 'requested', exit by doing perform LOG(FATAL) logging the method |
148 | | // called, the requested memory order, and the supported memory |
149 | | // orders. |
150 | | static void FatalMemOrderNotSupported(const char* caller, |
151 | | const char* requested = "kMemOrderBarrier", |
152 | | const char* supported = |
153 | | "kMemNorderNoBarrier, kMemOrderAcquire, kMemOrderRelease"); |
154 | | |
155 | | // The gutil/atomicops.h functions only operate on signed types. |
156 | | // So, even if the user specializes on an unsigned type, we use a |
157 | | // signed type internally. |
158 | | typedef typename boost::make_signed<T>::type SignedT; |
159 | | SignedT value_; |
160 | | |
161 | | DISALLOW_COPY_AND_ASSIGN(AtomicInt); |
162 | | }; |
163 | | |
164 | | // Adapts AtomicInt to handle boolean values. |
165 | | // |
166 | | // NOTE: All of public operations use an implicit memory order of |
167 | | // kMemOrderNoBarrier unless otherwise specified. |
168 | | // |
169 | | // See AtomicInt above for documentation on individual methods. |
170 | | class AtomicBool { |
171 | | public: |
172 | | explicit AtomicBool(bool value); |
173 | | |
174 | 93.9M | bool Load(MemoryOrder m = kMemOrderNoBarrier) const { |
175 | 93.9M | return underlying_.Load(m); |
176 | 93.9M | } |
177 | 192k | void Store(bool n, MemoryOrder m = kMemOrderNoBarrier) { |
178 | 192k | underlying_.Store(static_cast<int32_t>(n), m); |
179 | 192k | } |
180 | 119k | bool CompareAndSet(bool e, bool n, MemoryOrder m = kMemOrderNoBarrier) { |
181 | 119k | return underlying_.CompareAndSet(static_cast<int32_t>(e), static_cast<int32_t>(n), m); |
182 | 119k | } |
183 | | bool CompareAndSwap(bool e, bool n, MemoryOrder m = kMemOrderNoBarrier) { |
184 | | return underlying_.CompareAndSwap(static_cast<int32_t>(e), static_cast<int32_t>(n), m); |
185 | | } |
186 | | bool Exchange(bool n, MemoryOrder m = kMemOrderNoBarrier) { |
187 | | return underlying_.Exchange(static_cast<int32_t>(n), m); |
188 | | } |
189 | | private: |
190 | | AtomicInt<int32_t> underlying_; |
191 | | |
192 | | DISALLOW_COPY_AND_ASSIGN(AtomicBool); |
193 | | }; |
194 | | |
195 | | template<typename T> |
196 | 1.26G | inline T AtomicInt<T>::Load(MemoryOrder mem_order) const { |
197 | 1.26G | switch (mem_order) { |
198 | 1.14G | case kMemOrderNoBarrier: { |
199 | 1.14G | return base::subtle::NoBarrier_Load(&value_); |
200 | 0 | } |
201 | 0 | case kMemOrderBarrier: { |
202 | 0 | FatalMemOrderNotSupported("Load"); |
203 | 0 | break; |
204 | 0 | } |
205 | 151k | case kMemOrderAcquire: { |
206 | 151k | return base::subtle::Acquire_Load(&value_); |
207 | 0 | } |
208 | 121M | case kMemOrderRelease: { |
209 | 121M | return base::subtle::Release_Load(&value_); |
210 | 0 | } |
211 | 1.26G | } |
212 | 0 | abort(); // Unnecessary, but avoids gcc complaining. |
213 | 1.26G | } yb::AtomicInt<int>::Load(yb::MemoryOrder) const Line | Count | Source | 196 | 93.9M | inline T AtomicInt<T>::Load(MemoryOrder mem_order) const { | 197 | 93.9M | switch (mem_order) { | 198 | 93.8M | case kMemOrderNoBarrier: { | 199 | 93.8M | return base::subtle::NoBarrier_Load(&value_); | 200 | 0 | } | 201 | 0 | case kMemOrderBarrier: { | 202 | 0 | FatalMemOrderNotSupported("Load"); | 203 | 0 | break; | 204 | 0 | } | 205 | 151k | case kMemOrderAcquire: { | 206 | 151k | return base::subtle::Acquire_Load(&value_); | 207 | 0 | } | 208 | 23 | case kMemOrderRelease: { | 209 | 23 | return base::subtle::Release_Load(&value_); | 210 | 0 | } | 211 | 93.9M | } | 212 | 0 | abort(); // Unnecessary, but avoids gcc complaining. | 213 | 93.9M | } |
yb::AtomicInt<long long>::Load(yb::MemoryOrder) const Line | Count | Source | 196 | 1.17G | inline T AtomicInt<T>::Load(MemoryOrder mem_order) const { | 197 | 1.17G | switch (mem_order) { | 198 | 1.04G | case kMemOrderNoBarrier: { | 199 | 1.04G | return base::subtle::NoBarrier_Load(&value_); | 200 | 0 | } | 201 | 0 | case kMemOrderBarrier: { | 202 | 0 | FatalMemOrderNotSupported("Load"); | 203 | 0 | break; | 204 | 0 | } | 205 | 18 | case kMemOrderAcquire: { | 206 | 18 | return base::subtle::Acquire_Load(&value_); | 207 | 0 | } | 208 | 121M | case kMemOrderRelease: { | 209 | 121M | return base::subtle::Release_Load(&value_); | 210 | 0 | } | 211 | 1.17G | } | 212 | 0 | abort(); // Unnecessary, but avoids gcc complaining. | 213 | 1.17G | } |
yb::AtomicInt<unsigned long long>::Load(yb::MemoryOrder) const Line | Count | Source | 196 | 2.45M | inline T AtomicInt<T>::Load(MemoryOrder mem_order) const { | 197 | 2.45M | switch (mem_order) { | 198 | 2.45M | case kMemOrderNoBarrier: { | 199 | 2.45M | return base::subtle::NoBarrier_Load(&value_); | 200 | 0 | } | 201 | 0 | case kMemOrderBarrier: { | 202 | 0 | FatalMemOrderNotSupported("Load"); | 203 | 0 | break; | 204 | 0 | } | 205 | 18 | case kMemOrderAcquire: { | 206 | 18 | return base::subtle::Acquire_Load(&value_); | 207 | 0 | } | 208 | 18 | case kMemOrderRelease: { | 209 | 18 | return base::subtle::Release_Load(&value_); | 210 | 0 | } | 211 | 2.45M | } | 212 | 0 | abort(); // Unnecessary, but avoids gcc complaining. | 213 | 2.45M | } |
yb::AtomicInt<unsigned int>::Load(yb::MemoryOrder) const Line | Count | Source | 196 | 54 | inline T AtomicInt<T>::Load(MemoryOrder mem_order) const { | 197 | 54 | switch (mem_order) { | 198 | 18 | case kMemOrderNoBarrier: { | 199 | 18 | return base::subtle::NoBarrier_Load(&value_); | 200 | 0 | } | 201 | 0 | case kMemOrderBarrier: { | 202 | 0 | FatalMemOrderNotSupported("Load"); | 203 | 0 | break; | 204 | 0 | } | 205 | 18 | case kMemOrderAcquire: { | 206 | 18 | return base::subtle::Acquire_Load(&value_); | 207 | 0 | } | 208 | 18 | case kMemOrderRelease: { | 209 | 18 | return base::subtle::Release_Load(&value_); | 210 | 0 | } | 211 | 54 | } | 212 | 0 | abort(); // Unnecessary, but avoids gcc complaining. | 213 | 54 | } |
|
214 | | |
215 | | template<typename T> |
216 | 217M | inline void AtomicInt<T>::Store(T new_value, MemoryOrder mem_order) { |
217 | 217M | switch (mem_order) { |
218 | 217M | case kMemOrderNoBarrier: { |
219 | 217M | base::subtle::NoBarrier_Store(&value_, new_value); |
220 | 217M | break; |
221 | 0 | } |
222 | 0 | case kMemOrderBarrier: { |
223 | 0 | FatalMemOrderNotSupported("Store"); |
224 | 0 | break; |
225 | 0 | } |
226 | 13 | case kMemOrderAcquire: { |
227 | 13 | base::subtle::Acquire_Store(&value_, new_value); |
228 | 13 | break; |
229 | 0 | } |
230 | 75.6k | case kMemOrderRelease: { |
231 | 75.6k | base::subtle::Release_Store(&value_, new_value); |
232 | 75.6k | break; |
233 | 0 | } |
234 | 217M | } |
235 | 217M | } yb::AtomicInt<long long>::Store(long long, yb::MemoryOrder) Line | Count | Source | 216 | 204M | inline void AtomicInt<T>::Store(T new_value, MemoryOrder mem_order) { | 217 | 204M | switch (mem_order) { | 218 | 204M | case kMemOrderNoBarrier: { | 219 | 204M | base::subtle::NoBarrier_Store(&value_, new_value); | 220 | 204M | break; | 221 | 0 | } | 222 | 0 | case kMemOrderBarrier: { | 223 | 0 | FatalMemOrderNotSupported("Store"); | 224 | 0 | break; | 225 | 0 | } | 226 | 3 | case kMemOrderAcquire: { | 227 | 3 | base::subtle::Acquire_Store(&value_, new_value); | 228 | 3 | break; | 229 | 0 | } | 230 | 3 | case kMemOrderRelease: { | 231 | 3 | base::subtle::Release_Store(&value_, new_value); | 232 | 3 | break; | 233 | 0 | } | 234 | 204M | } | 235 | 204M | } |
yb::AtomicInt<int>::Store(int, yb::MemoryOrder) Line | Count | Source | 216 | 12.3M | inline void AtomicInt<T>::Store(T new_value, MemoryOrder mem_order) { | 217 | 12.3M | switch (mem_order) { | 218 | 12.2M | case kMemOrderNoBarrier: { | 219 | 12.2M | base::subtle::NoBarrier_Store(&value_, new_value); | 220 | 12.2M | break; | 221 | 0 | } | 222 | 0 | case kMemOrderBarrier: { | 223 | 0 | FatalMemOrderNotSupported("Store"); | 224 | 0 | break; | 225 | 0 | } | 226 | 4 | case kMemOrderAcquire: { | 227 | 4 | base::subtle::Acquire_Store(&value_, new_value); | 228 | 4 | break; | 229 | 0 | } | 230 | 75.6k | case kMemOrderRelease: { | 231 | 75.6k | base::subtle::Release_Store(&value_, new_value); | 232 | 75.6k | break; | 233 | 0 | } | 234 | 12.3M | } | 235 | 12.3M | } |
yb::AtomicInt<unsigned int>::Store(unsigned int, yb::MemoryOrder) Line | Count | Source | 216 | 20 | inline void AtomicInt<T>::Store(T new_value, MemoryOrder mem_order) { | 217 | 20 | switch (mem_order) { | 218 | 14 | case kMemOrderNoBarrier: { | 219 | 14 | base::subtle::NoBarrier_Store(&value_, new_value); | 220 | 14 | break; | 221 | 0 | } | 222 | 0 | case kMemOrderBarrier: { | 223 | 0 | FatalMemOrderNotSupported("Store"); | 224 | 0 | break; | 225 | 0 | } | 226 | 3 | case kMemOrderAcquire: { | 227 | 3 | base::subtle::Acquire_Store(&value_, new_value); | 228 | 3 | break; | 229 | 0 | } | 230 | 3 | case kMemOrderRelease: { | 231 | 3 | base::subtle::Release_Store(&value_, new_value); | 232 | 3 | break; | 233 | 0 | } | 234 | 20 | } | 235 | 20 | } |
yb::AtomicInt<unsigned long long>::Store(unsigned long long, yb::MemoryOrder) Line | Count | Source | 216 | 46.7k | inline void AtomicInt<T>::Store(T new_value, MemoryOrder mem_order) { | 217 | 46.7k | switch (mem_order) { | 218 | 46.7k | case kMemOrderNoBarrier: { | 219 | 46.7k | base::subtle::NoBarrier_Store(&value_, new_value); | 220 | 46.7k | break; | 221 | 0 | } | 222 | 0 | case kMemOrderBarrier: { | 223 | 0 | FatalMemOrderNotSupported("Store"); | 224 | 0 | break; | 225 | 0 | } | 226 | 3 | case kMemOrderAcquire: { | 227 | 3 | base::subtle::Acquire_Store(&value_, new_value); | 228 | 3 | break; | 229 | 0 | } | 230 | 3 | case kMemOrderRelease: { | 231 | 3 | base::subtle::Release_Store(&value_, new_value); | 232 | 3 | break; | 233 | 0 | } | 234 | 46.7k | } | 235 | 46.7k | } |
|
236 | | |
237 | | template<typename T> |
238 | 999M | inline bool AtomicInt<T>::CompareAndSet(T expected_val, T new_val, MemoryOrder mem_order) { |
239 | 999M | return CompareAndSwap(expected_val, new_val, mem_order) == expected_val; |
240 | 999M | } yb::AtomicInt<int>::CompareAndSet(int, int, yb::MemoryOrder) Line | Count | Source | 238 | 119k | inline bool AtomicInt<T>::CompareAndSet(T expected_val, T new_val, MemoryOrder mem_order) { | 239 | 119k | return CompareAndSwap(expected_val, new_val, mem_order) == expected_val; | 240 | 119k | } |
yb::AtomicInt<long long>::CompareAndSet(long long, long long, yb::MemoryOrder) Line | Count | Source | 238 | 999M | inline bool AtomicInt<T>::CompareAndSet(T expected_val, T new_val, MemoryOrder mem_order) { | 239 | 999M | return CompareAndSwap(expected_val, new_val, mem_order) == expected_val; | 240 | 999M | } |
yb::AtomicInt<unsigned int>::CompareAndSet(unsigned int, unsigned int, yb::MemoryOrder) Line | Count | Source | 238 | 6 | inline bool AtomicInt<T>::CompareAndSet(T expected_val, T new_val, MemoryOrder mem_order) { | 239 | 6 | return CompareAndSwap(expected_val, new_val, mem_order) == expected_val; | 240 | 6 | } |
yb::AtomicInt<unsigned long long>::CompareAndSet(unsigned long long, unsigned long long, yb::MemoryOrder) Line | Count | Source | 238 | 6 | inline bool AtomicInt<T>::CompareAndSet(T expected_val, T new_val, MemoryOrder mem_order) { | 239 | 6 | return CompareAndSwap(expected_val, new_val, mem_order) == expected_val; | 240 | 6 | } |
|
241 | | |
242 | | template<typename T> |
243 | 1.02G | inline T AtomicInt<T>::CompareAndSwap(T expected_val, T new_val, MemoryOrder mem_order) { |
244 | 1.02G | switch (mem_order) { |
245 | 1.02G | case kMemOrderNoBarrier: { |
246 | 1.02G | return base::subtle::NoBarrier_CompareAndSwap( |
247 | 1.02G | &value_, expected_val, new_val); |
248 | 0 | } |
249 | 0 | case kMemOrderBarrier: { |
250 | 0 | FatalMemOrderNotSupported("CompareAndSwap/CompareAndSet"); |
251 | 0 | break; |
252 | 0 | } |
253 | 47 | case kMemOrderAcquire: { |
254 | 47 | return base::subtle::Acquire_CompareAndSwap( |
255 | 47 | &value_, expected_val, new_val); |
256 | 0 | } |
257 | 47 | case kMemOrderRelease: { |
258 | 47 | return base::subtle::Release_CompareAndSwap( |
259 | 47 | &value_, expected_val, new_val); |
260 | 0 | } |
261 | 1.02G | } |
262 | 0 | abort(); |
263 | 1.02G | } yb::AtomicInt<unsigned long long>::CompareAndSwap(unsigned long long, unsigned long long, yb::MemoryOrder) Line | Count | Source | 243 | 2.45M | inline T AtomicInt<T>::CompareAndSwap(T expected_val, T new_val, MemoryOrder mem_order) { | 244 | 2.45M | switch (mem_order) { | 245 | 2.45M | case kMemOrderNoBarrier: { | 246 | 2.45M | return base::subtle::NoBarrier_CompareAndSwap( | 247 | 2.45M | &value_, expected_val, new_val); | 248 | 0 | } | 249 | 0 | case kMemOrderBarrier: { | 250 | 0 | FatalMemOrderNotSupported("CompareAndSwap/CompareAndSet"); | 251 | 0 | break; | 252 | 0 | } | 253 | 11 | case kMemOrderAcquire: { | 254 | 11 | return base::subtle::Acquire_CompareAndSwap( | 255 | 11 | &value_, expected_val, new_val); | 256 | 0 | } | 257 | 11 | case kMemOrderRelease: { | 258 | 11 | return base::subtle::Release_CompareAndSwap( | 259 | 11 | &value_, expected_val, new_val); | 260 | 0 | } | 261 | 2.45M | } | 262 | 0 | abort(); | 263 | 2.45M | } |
yb::AtomicInt<int>::CompareAndSwap(int, int, yb::MemoryOrder) Line | Count | Source | 243 | 119k | inline T AtomicInt<T>::CompareAndSwap(T expected_val, T new_val, MemoryOrder mem_order) { | 244 | 119k | switch (mem_order) { | 245 | 119k | case kMemOrderNoBarrier: { | 246 | 119k | return base::subtle::NoBarrier_CompareAndSwap( | 247 | 119k | &value_, expected_val, new_val); | 248 | 0 | } | 249 | 0 | case kMemOrderBarrier: { | 250 | 0 | FatalMemOrderNotSupported("CompareAndSwap/CompareAndSet"); | 251 | 0 | break; | 252 | 0 | } | 253 | 14 | case kMemOrderAcquire: { | 254 | 14 | return base::subtle::Acquire_CompareAndSwap( | 255 | 14 | &value_, expected_val, new_val); | 256 | 0 | } | 257 | 14 | case kMemOrderRelease: { | 258 | 14 | return base::subtle::Release_CompareAndSwap( | 259 | 14 | &value_, expected_val, new_val); | 260 | 0 | } | 261 | 119k | } | 262 | 0 | abort(); | 263 | 119k | } |
yb::AtomicInt<long long>::CompareAndSwap(long long, long long, yb::MemoryOrder) Line | Count | Source | 243 | 1.02G | inline T AtomicInt<T>::CompareAndSwap(T expected_val, T new_val, MemoryOrder mem_order) { | 244 | 1.02G | switch (mem_order) { | 245 | 1.02G | case kMemOrderNoBarrier: { | 246 | 1.02G | return base::subtle::NoBarrier_CompareAndSwap( | 247 | 1.02G | &value_, expected_val, new_val); | 248 | 0 | } | 249 | 0 | case kMemOrderBarrier: { | 250 | 0 | FatalMemOrderNotSupported("CompareAndSwap/CompareAndSet"); | 251 | 0 | break; | 252 | 0 | } | 253 | 11 | case kMemOrderAcquire: { | 254 | 11 | return base::subtle::Acquire_CompareAndSwap( | 255 | 11 | &value_, expected_val, new_val); | 256 | 0 | } | 257 | 11 | case kMemOrderRelease: { | 258 | 11 | return base::subtle::Release_CompareAndSwap( | 259 | 11 | &value_, expected_val, new_val); | 260 | 0 | } | 261 | 1.02G | } | 262 | 0 | abort(); | 263 | 1.02G | } |
yb::AtomicInt<unsigned int>::CompareAndSwap(unsigned int, unsigned int, yb::MemoryOrder) Line | Count | Source | 243 | 33 | inline T AtomicInt<T>::CompareAndSwap(T expected_val, T new_val, MemoryOrder mem_order) { | 244 | 33 | switch (mem_order) { | 245 | 11 | case kMemOrderNoBarrier: { | 246 | 11 | return base::subtle::NoBarrier_CompareAndSwap( | 247 | 11 | &value_, expected_val, new_val); | 248 | 0 | } | 249 | 0 | case kMemOrderBarrier: { | 250 | 0 | FatalMemOrderNotSupported("CompareAndSwap/CompareAndSet"); | 251 | 0 | break; | 252 | 0 | } | 253 | 11 | case kMemOrderAcquire: { | 254 | 11 | return base::subtle::Acquire_CompareAndSwap( | 255 | 11 | &value_, expected_val, new_val); | 256 | 0 | } | 257 | 11 | case kMemOrderRelease: { | 258 | 11 | return base::subtle::Release_CompareAndSwap( | 259 | 11 | &value_, expected_val, new_val); | 260 | 0 | } | 261 | 33 | } | 262 | 0 | abort(); | 263 | 33 | } |
|
264 | | |
265 | | |
266 | | template<typename T> |
267 | 4.91M | inline T AtomicInt<T>::Increment(MemoryOrder mem_order) { |
268 | 4.91M | return IncrementBy(1, mem_order); |
269 | 4.91M | } yb::AtomicInt<int>::Increment(yb::MemoryOrder) Line | Count | Source | 267 | 17.3k | inline T AtomicInt<T>::Increment(MemoryOrder mem_order) { | 268 | 17.3k | return IncrementBy(1, mem_order); | 269 | 17.3k | } |
yb::AtomicInt<long long>::Increment(yb::MemoryOrder) Line | Count | Source | 267 | 4.83M | inline T AtomicInt<T>::Increment(MemoryOrder mem_order) { | 268 | 4.83M | return IncrementBy(1, mem_order); | 269 | 4.83M | } |
yb::AtomicInt<unsigned int>::Increment(yb::MemoryOrder) Line | Count | Source | 267 | 2 | inline T AtomicInt<T>::Increment(MemoryOrder mem_order) { | 268 | 2 | return IncrementBy(1, mem_order); | 269 | 2 | } |
yb::AtomicInt<unsigned long long>::Increment(yb::MemoryOrder) Line | Count | Source | 267 | 58.1k | inline T AtomicInt<T>::Increment(MemoryOrder mem_order) { | 268 | 58.1k | return IncrementBy(1, mem_order); | 269 | 58.1k | } |
|
270 | | |
271 | | template<typename T> |
272 | 6.43G | inline T AtomicInt<T>::IncrementBy(T delta, MemoryOrder mem_order) { |
273 | 6.43G | switch (mem_order) { |
274 | 6.43G | case kMemOrderNoBarrier: { |
275 | 6.43G | return base::subtle::NoBarrier_AtomicIncrement(&value_, delta); |
276 | 0 | } |
277 | 12 | case kMemOrderBarrier: { |
278 | 12 | return base::subtle::Barrier_AtomicIncrement(&value_, delta); |
279 | 0 | } |
280 | 0 | case kMemOrderAcquire: { |
281 | 0 | FatalMemOrderNotSupported("Increment/IncrementBy", |
282 | 0 | "kMemOrderAcquire", |
283 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); |
284 | 0 | break; |
285 | 0 | } |
286 | 0 | case kMemOrderRelease: { |
287 | 0 | FatalMemOrderNotSupported("Increment/Incrementby", |
288 | 0 | "kMemOrderAcquire", |
289 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); |
290 | 0 | break; |
291 | 0 | } |
292 | 6.43G | } |
293 | 0 | abort(); |
294 | 6.43G | } yb::AtomicInt<int>::IncrementBy(int, yb::MemoryOrder) Line | Count | Source | 272 | 18.7k | inline T AtomicInt<T>::IncrementBy(T delta, MemoryOrder mem_order) { | 273 | 18.7k | switch (mem_order) { | 274 | 18.7k | case kMemOrderNoBarrier: { | 275 | 18.7k | return base::subtle::NoBarrier_AtomicIncrement(&value_, delta); | 276 | 0 | } | 277 | 3 | case kMemOrderBarrier: { | 278 | 3 | return base::subtle::Barrier_AtomicIncrement(&value_, delta); | 279 | 0 | } | 280 | 0 | case kMemOrderAcquire: { | 281 | 0 | FatalMemOrderNotSupported("Increment/IncrementBy", | 282 | 0 | "kMemOrderAcquire", | 283 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 284 | 0 | break; | 285 | 0 | } | 286 | 0 | case kMemOrderRelease: { | 287 | 0 | FatalMemOrderNotSupported("Increment/Incrementby", | 288 | 0 | "kMemOrderAcquire", | 289 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 290 | 0 | break; | 291 | 0 | } | 292 | 18.7k | } | 293 | 0 | abort(); | 294 | 18.7k | } |
yb::AtomicInt<long long>::IncrementBy(long long, yb::MemoryOrder) Line | Count | Source | 272 | 6.43G | inline T AtomicInt<T>::IncrementBy(T delta, MemoryOrder mem_order) { | 273 | 6.43G | switch (mem_order) { | 274 | 6.43G | case kMemOrderNoBarrier: { | 275 | 6.43G | return base::subtle::NoBarrier_AtomicIncrement(&value_, delta); | 276 | 0 | } | 277 | 3 | case kMemOrderBarrier: { | 278 | 3 | return base::subtle::Barrier_AtomicIncrement(&value_, delta); | 279 | 0 | } | 280 | 0 | case kMemOrderAcquire: { | 281 | 0 | FatalMemOrderNotSupported("Increment/IncrementBy", | 282 | 0 | "kMemOrderAcquire", | 283 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 284 | 0 | break; | 285 | 0 | } | 286 | 0 | case kMemOrderRelease: { | 287 | 0 | FatalMemOrderNotSupported("Increment/Incrementby", | 288 | 0 | "kMemOrderAcquire", | 289 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 290 | 0 | break; | 291 | 0 | } | 292 | 6.43G | } | 293 | 0 | abort(); | 294 | 6.43G | } |
yb::AtomicInt<unsigned int>::IncrementBy(unsigned int, yb::MemoryOrder) Line | Count | Source | 272 | 6 | inline T AtomicInt<T>::IncrementBy(T delta, MemoryOrder mem_order) { | 273 | 6 | switch (mem_order) { | 274 | 3 | case kMemOrderNoBarrier: { | 275 | 3 | return base::subtle::NoBarrier_AtomicIncrement(&value_, delta); | 276 | 0 | } | 277 | 3 | case kMemOrderBarrier: { | 278 | 3 | return base::subtle::Barrier_AtomicIncrement(&value_, delta); | 279 | 0 | } | 280 | 0 | case kMemOrderAcquire: { | 281 | 0 | FatalMemOrderNotSupported("Increment/IncrementBy", | 282 | 0 | "kMemOrderAcquire", | 283 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 284 | 0 | break; | 285 | 0 | } | 286 | 0 | case kMemOrderRelease: { | 287 | 0 | FatalMemOrderNotSupported("Increment/Incrementby", | 288 | 0 | "kMemOrderAcquire", | 289 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 290 | 0 | break; | 291 | 0 | } | 292 | 6 | } | 293 | 0 | abort(); | 294 | 6 | } |
yb::AtomicInt<unsigned long long>::IncrementBy(unsigned long long, yb::MemoryOrder) Line | Count | Source | 272 | 58.1k | inline T AtomicInt<T>::IncrementBy(T delta, MemoryOrder mem_order) { | 273 | 58.1k | switch (mem_order) { | 274 | 58.1k | case kMemOrderNoBarrier: { | 275 | 58.1k | return base::subtle::NoBarrier_AtomicIncrement(&value_, delta); | 276 | 0 | } | 277 | 3 | case kMemOrderBarrier: { | 278 | 3 | return base::subtle::Barrier_AtomicIncrement(&value_, delta); | 279 | 0 | } | 280 | 0 | case kMemOrderAcquire: { | 281 | 0 | FatalMemOrderNotSupported("Increment/IncrementBy", | 282 | 0 | "kMemOrderAcquire", | 283 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 284 | 0 | break; | 285 | 0 | } | 286 | 0 | case kMemOrderRelease: { | 287 | 0 | FatalMemOrderNotSupported("Increment/Incrementby", | 288 | 0 | "kMemOrderAcquire", | 289 | 0 | "kMemOrderNoBarrier and kMemOrderBarrier"); | 290 | 0 | break; | 291 | 0 | } | 292 | 58.1k | } | 293 | 0 | abort(); | 294 | 58.1k | } |
|
295 | | |
296 | | template<typename T> |
297 | 17 | inline T AtomicInt<T>::Exchange(T new_value, MemoryOrder mem_order) { |
298 | 17 | switch (mem_order) { |
299 | 7 | case kMemOrderNoBarrier: { |
300 | 7 | return base::subtle::NoBarrier_AtomicExchange(&value_, new_value); |
301 | 0 | } |
302 | 0 | case kMemOrderBarrier: { |
303 | 0 | FatalMemOrderNotSupported("Exchange"); |
304 | 0 | break; |
305 | 0 | } |
306 | 5 | case kMemOrderAcquire: { |
307 | 5 | return base::subtle::Acquire_AtomicExchange(&value_, new_value); |
308 | 0 | } |
309 | 5 | case kMemOrderRelease: { |
310 | 5 | return base::subtle::Release_AtomicExchange(&value_, new_value); |
311 | 0 | } |
312 | 17 | } |
313 | 0 | abort(); |
314 | 17 | } yb::AtomicInt<int>::Exchange(int, yb::MemoryOrder) Line | Count | Source | 297 | 6 | inline T AtomicInt<T>::Exchange(T new_value, MemoryOrder mem_order) { | 298 | 6 | switch (mem_order) { | 299 | 2 | case kMemOrderNoBarrier: { | 300 | 2 | return base::subtle::NoBarrier_AtomicExchange(&value_, new_value); | 301 | 0 | } | 302 | 0 | case kMemOrderBarrier: { | 303 | 0 | FatalMemOrderNotSupported("Exchange"); | 304 | 0 | break; | 305 | 0 | } | 306 | 2 | case kMemOrderAcquire: { | 307 | 2 | return base::subtle::Acquire_AtomicExchange(&value_, new_value); | 308 | 0 | } | 309 | 2 | case kMemOrderRelease: { | 310 | 2 | return base::subtle::Release_AtomicExchange(&value_, new_value); | 311 | 0 | } | 312 | 6 | } | 313 | 0 | abort(); | 314 | 6 | } |
yb::AtomicInt<long long>::Exchange(long long, yb::MemoryOrder) Line | Count | Source | 297 | 5 | inline T AtomicInt<T>::Exchange(T new_value, MemoryOrder mem_order) { | 298 | 5 | switch (mem_order) { | 299 | 3 | case kMemOrderNoBarrier: { | 300 | 3 | return base::subtle::NoBarrier_AtomicExchange(&value_, new_value); | 301 | 0 | } | 302 | 0 | case kMemOrderBarrier: { | 303 | 0 | FatalMemOrderNotSupported("Exchange"); | 304 | 0 | break; | 305 | 0 | } | 306 | 1 | case kMemOrderAcquire: { | 307 | 1 | return base::subtle::Acquire_AtomicExchange(&value_, new_value); | 308 | 0 | } | 309 | 1 | case kMemOrderRelease: { | 310 | 1 | return base::subtle::Release_AtomicExchange(&value_, new_value); | 311 | 0 | } | 312 | 5 | } | 313 | 0 | abort(); | 314 | 5 | } |
yb::AtomicInt<unsigned int>::Exchange(unsigned int, yb::MemoryOrder) Line | Count | Source | 297 | 3 | inline T AtomicInt<T>::Exchange(T new_value, MemoryOrder mem_order) { | 298 | 3 | switch (mem_order) { | 299 | 1 | case kMemOrderNoBarrier: { | 300 | 1 | return base::subtle::NoBarrier_AtomicExchange(&value_, new_value); | 301 | 0 | } | 302 | 0 | case kMemOrderBarrier: { | 303 | 0 | FatalMemOrderNotSupported("Exchange"); | 304 | 0 | break; | 305 | 0 | } | 306 | 1 | case kMemOrderAcquire: { | 307 | 1 | return base::subtle::Acquire_AtomicExchange(&value_, new_value); | 308 | 0 | } | 309 | 1 | case kMemOrderRelease: { | 310 | 1 | return base::subtle::Release_AtomicExchange(&value_, new_value); | 311 | 0 | } | 312 | 3 | } | 313 | 0 | abort(); | 314 | 3 | } |
yb::AtomicInt<unsigned long long>::Exchange(unsigned long long, yb::MemoryOrder) Line | Count | Source | 297 | 3 | inline T AtomicInt<T>::Exchange(T new_value, MemoryOrder mem_order) { | 298 | 3 | switch (mem_order) { | 299 | 1 | case kMemOrderNoBarrier: { | 300 | 1 | return base::subtle::NoBarrier_AtomicExchange(&value_, new_value); | 301 | 0 | } | 302 | 0 | case kMemOrderBarrier: { | 303 | 0 | FatalMemOrderNotSupported("Exchange"); | 304 | 0 | break; | 305 | 0 | } | 306 | 1 | case kMemOrderAcquire: { | 307 | 1 | return base::subtle::Acquire_AtomicExchange(&value_, new_value); | 308 | 0 | } | 309 | 1 | case kMemOrderRelease: { | 310 | 1 | return base::subtle::Release_AtomicExchange(&value_, new_value); | 311 | 0 | } | 312 | 3 | } | 313 | 0 | abort(); | 314 | 3 | } |
|
315 | | |
316 | | template<typename T> |
317 | 27.6M | inline void AtomicInt<T>::StoreMax(T new_value, MemoryOrder mem_order) { |
318 | 27.6M | T old_value = Load(mem_order); |
319 | 27.6M | while (true27.6M ) { |
320 | 27.6M | T max_value = std::max(old_value, new_value); |
321 | 27.6M | T prev_value = CompareAndSwap(old_value, max_value, mem_order); |
322 | 27.6M | if (PREDICT_TRUE(old_value == prev_value)) { |
323 | 27.6M | break; |
324 | 27.6M | } |
325 | 14.8k | old_value = prev_value; |
326 | 14.8k | } |
327 | 27.6M | } yb::AtomicInt<unsigned long long>::StoreMax(unsigned long long, yb::MemoryOrder) Line | Count | Source | 317 | 2.45M | inline void AtomicInt<T>::StoreMax(T new_value, MemoryOrder mem_order) { | 318 | 2.45M | T old_value = Load(mem_order); | 319 | 2.45M | while (true2.45M ) { | 320 | 2.45M | T max_value = std::max(old_value, new_value); | 321 | 2.45M | T prev_value = CompareAndSwap(old_value, max_value, mem_order); | 322 | 2.45M | if (PREDICT_TRUE(old_value == prev_value)) { | 323 | 2.45M | break; | 324 | 2.45M | } | 325 | 2.39k | old_value = prev_value; | 326 | 2.39k | } | 327 | 2.45M | } |
yb::AtomicInt<int>::StoreMax(int, yb::MemoryOrder) Line | Count | Source | 317 | 9 | inline void AtomicInt<T>::StoreMax(T new_value, MemoryOrder mem_order) { | 318 | 9 | T old_value = Load(mem_order); | 319 | 9 | while (true) { | 320 | 9 | T max_value = std::max(old_value, new_value); | 321 | 9 | T prev_value = CompareAndSwap(old_value, max_value, mem_order); | 322 | 9 | if (PREDICT_TRUE(old_value == prev_value)) { | 323 | 9 | break; | 324 | 9 | } | 325 | 0 | old_value = prev_value; | 326 | 0 | } | 327 | 9 | } |
yb::AtomicInt<long long>::StoreMax(long long, yb::MemoryOrder) Line | Count | Source | 317 | 25.1M | inline void AtomicInt<T>::StoreMax(T new_value, MemoryOrder mem_order) { | 318 | 25.1M | T old_value = Load(mem_order); | 319 | 25.1M | while (true25.1M ) { | 320 | 25.1M | T max_value = std::max(old_value, new_value); | 321 | 25.1M | T prev_value = CompareAndSwap(old_value, max_value, mem_order); | 322 | 25.1M | if (PREDICT_TRUE(old_value == prev_value)) { | 323 | 25.1M | break; | 324 | 25.1M | } | 325 | 12.4k | old_value = prev_value; | 326 | 12.4k | } | 327 | 25.1M | } |
yb::AtomicInt<unsigned int>::StoreMax(unsigned int, yb::MemoryOrder) Line | Count | Source | 317 | 9 | inline void AtomicInt<T>::StoreMax(T new_value, MemoryOrder mem_order) { | 318 | 9 | T old_value = Load(mem_order); | 319 | 9 | while (true) { | 320 | 9 | T max_value = std::max(old_value, new_value); | 321 | 9 | T prev_value = CompareAndSwap(old_value, max_value, mem_order); | 322 | 9 | if (PREDICT_TRUE(old_value == prev_value)) { | 323 | 9 | break; | 324 | 9 | } | 325 | 0 | old_value = prev_value; | 326 | 0 | } | 327 | 9 | } |
|
328 | | |
329 | | template<typename T> |
330 | 36 | inline void AtomicInt<T>::StoreMin(T new_value, MemoryOrder mem_order) { |
331 | 36 | T old_value = Load(mem_order); |
332 | 36 | while (true) { |
333 | 36 | T min_value = std::min(old_value, new_value); |
334 | 36 | T prev_value = CompareAndSwap(old_value, min_value, mem_order); |
335 | 36 | if (PREDICT_TRUE(old_value == prev_value)) { |
336 | 36 | break; |
337 | 36 | } |
338 | 0 | old_value = prev_value; |
339 | 0 | } |
340 | 36 | } yb::AtomicInt<int>::StoreMin(int, yb::MemoryOrder) Line | Count | Source | 330 | 9 | inline void AtomicInt<T>::StoreMin(T new_value, MemoryOrder mem_order) { | 331 | 9 | T old_value = Load(mem_order); | 332 | 9 | while (true) { | 333 | 9 | T min_value = std::min(old_value, new_value); | 334 | 9 | T prev_value = CompareAndSwap(old_value, min_value, mem_order); | 335 | 9 | if (PREDICT_TRUE(old_value == prev_value)) { | 336 | 9 | break; | 337 | 9 | } | 338 | 0 | old_value = prev_value; | 339 | 0 | } | 340 | 9 | } |
yb::AtomicInt<long long>::StoreMin(long long, yb::MemoryOrder) Line | Count | Source | 330 | 9 | inline void AtomicInt<T>::StoreMin(T new_value, MemoryOrder mem_order) { | 331 | 9 | T old_value = Load(mem_order); | 332 | 9 | while (true) { | 333 | 9 | T min_value = std::min(old_value, new_value); | 334 | 9 | T prev_value = CompareAndSwap(old_value, min_value, mem_order); | 335 | 9 | if (PREDICT_TRUE(old_value == prev_value)) { | 336 | 9 | break; | 337 | 9 | } | 338 | 0 | old_value = prev_value; | 339 | 0 | } | 340 | 9 | } |
yb::AtomicInt<unsigned int>::StoreMin(unsigned int, yb::MemoryOrder) Line | Count | Source | 330 | 9 | inline void AtomicInt<T>::StoreMin(T new_value, MemoryOrder mem_order) { | 331 | 9 | T old_value = Load(mem_order); | 332 | 9 | while (true) { | 333 | 9 | T min_value = std::min(old_value, new_value); | 334 | 9 | T prev_value = CompareAndSwap(old_value, min_value, mem_order); | 335 | 9 | if (PREDICT_TRUE(old_value == prev_value)) { | 336 | 9 | break; | 337 | 9 | } | 338 | 0 | old_value = prev_value; | 339 | 0 | } | 340 | 9 | } |
yb::AtomicInt<unsigned long long>::StoreMin(unsigned long long, yb::MemoryOrder) Line | Count | Source | 330 | 9 | inline void AtomicInt<T>::StoreMin(T new_value, MemoryOrder mem_order) { | 331 | 9 | T old_value = Load(mem_order); | 332 | 9 | while (true) { | 333 | 9 | T min_value = std::min(old_value, new_value); | 334 | 9 | T prev_value = CompareAndSwap(old_value, min_value, mem_order); | 335 | 9 | if (PREDICT_TRUE(old_value == prev_value)) { | 336 | 9 | break; | 337 | 9 | } | 338 | 0 | old_value = prev_value; | 339 | 0 | } | 340 | 9 | } |
|
341 | | |
342 | | template<typename T> |
343 | | class AtomicUniquePtr { |
344 | | public: |
345 | 7.97M | AtomicUniquePtr() {} yb::AtomicUniquePtr<std::__1::function<int (std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)> >::AtomicUniquePtr() Line | Count | Source | 345 | 9.27k | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<yb::Status>::AtomicUniquePtr() Line | Count | Source | 345 | 150k | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<yb::rpc::Strand>::AtomicUniquePtr() Line | Count | Source | 345 | 150k | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<ev::timer>::AtomicUniquePtr() Line | Count | Source | 345 | 7.51M | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<yb::rpc::ThreadPool>::AtomicUniquePtr() Line | Count | Source | 345 | 35.2k | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<yb::ServerRegistrationPB>::AtomicUniquePtr() Line | Count | Source | 345 | 8.07k | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<yb::client::YBClient>::AtomicUniquePtr() Line | Count | Source | 345 | 33.5k | AtomicUniquePtr() {} |
yb::AtomicUniquePtr<rocksdb::IndexReader>::AtomicUniquePtr() Line | Count | Source | 345 | 78.0k | AtomicUniquePtr() {} |
|
346 | | AtomicUniquePtr(const AtomicUniquePtr<T>&) = delete; |
347 | | void operator=(const AtomicUniquePtr&) = delete; |
348 | | |
349 | 18 | explicit AtomicUniquePtr(T* ptr) : ptr_(ptr) {} |
350 | | |
351 | | AtomicUniquePtr(AtomicUniquePtr<T>&& other) : ptr_(other.release()) {} |
352 | | |
353 | 18 | void operator=(AtomicUniquePtr<T>&& other) { |
354 | 18 | reset(other.release()); |
355 | 18 | } |
356 | | |
357 | 6.73M | ~AtomicUniquePtr() { |
358 | 6.73M | delete get(); |
359 | 6.73M | } yb::AtomicUniquePtr<std::__1::function<int (std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)> >::~AtomicUniquePtr() Line | Count | Source | 357 | 164 | ~AtomicUniquePtr() { | 358 | 164 | delete get(); | 359 | 164 | } |
yb::AtomicUniquePtr<yb::Status>::~AtomicUniquePtr() Line | Count | Source | 357 | 74.1k | ~AtomicUniquePtr() { | 358 | 74.1k | delete get(); | 359 | 74.1k | } |
yb::AtomicUniquePtr<yb::rpc::Strand>::~AtomicUniquePtr() Line | Count | Source | 357 | 74.1k | ~AtomicUniquePtr() { | 358 | 74.1k | delete get(); | 359 | 74.1k | } |
yb::AtomicUniquePtr<ev::timer>::~AtomicUniquePtr() Line | Count | Source | 357 | 6.50M | ~AtomicUniquePtr() { | 358 | 6.50M | delete get(); | 359 | 6.50M | } |
yb::AtomicUniquePtr<yb::rpc::ThreadPool>::~AtomicUniquePtr() Line | Count | Source | 357 | 9.02k | ~AtomicUniquePtr() { | 358 | 9.02k | delete get(); | 359 | 9.02k | } |
yb::AtomicUniquePtr<yb::ServerRegistrationPB>::~AtomicUniquePtr() Line | Count | Source | 357 | 92 | ~AtomicUniquePtr() { | 358 | 92 | delete get(); | 359 | 92 | } |
yb::AtomicUniquePtr<yb::client::YBClient>::~AtomicUniquePtr() Line | Count | Source | 357 | 356 | ~AtomicUniquePtr() { | 358 | 356 | delete get(); | 359 | 356 | } |
yb::AtomicUniquePtr<rocksdb::IndexReader>::~AtomicUniquePtr() Line | Count | Source | 357 | 75.8k | ~AtomicUniquePtr() { | 358 | 75.8k | delete get(); | 359 | 75.8k | } |
|
360 | | |
361 | 2.41G | T* get(std::memory_order memory_order = std::memory_order_acquire) const { |
362 | 2.41G | return ptr_.load(memory_order); |
363 | 2.41G | } yb::AtomicUniquePtr<std::__1::function<int (std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)> >::get(std::__1::memory_order) const Line | Count | Source | 361 | 848 | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 848 | return ptr_.load(memory_order); | 363 | 848 | } |
yb::AtomicUniquePtr<yb::Status>::get(std::__1::memory_order) const Line | Count | Source | 361 | 76.1k | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 76.1k | return ptr_.load(memory_order); | 363 | 76.1k | } |
yb::AtomicUniquePtr<yb::rpc::Strand>::get(std::__1::memory_order) const Line | Count | Source | 361 | 1.82M | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 1.82M | return ptr_.load(memory_order); | 363 | 1.82M | } |
yb::AtomicUniquePtr<ev::timer>::get(std::__1::memory_order) const Line | Count | Source | 361 | 2.33G | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 2.33G | return ptr_.load(memory_order); | 363 | 2.33G | } |
yb::AtomicUniquePtr<yb::rpc::ThreadPool>::get(std::__1::memory_order) const Line | Count | Source | 361 | 27.3M | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 27.3M | return ptr_.load(memory_order); | 363 | 27.3M | } |
yb::AtomicUniquePtr<yb::ServerRegistrationPB>::get(std::__1::memory_order) const Line | Count | Source | 361 | 28.8M | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 28.8M | return ptr_.load(memory_order); | 363 | 28.8M | } |
yb::AtomicUniquePtr<yb::client::YBClient>::get(std::__1::memory_order) const Line | Count | Source | 361 | 38.5k | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 38.5k | return ptr_.load(memory_order); | 363 | 38.5k | } |
yb::AtomicUniquePtr<rocksdb::IndexReader>::get(std::__1::memory_order) const Line | Count | Source | 361 | 22.7M | T* get(std::memory_order memory_order = std::memory_order_acquire) const { | 362 | 22.7M | return ptr_.load(memory_order); | 363 | 22.7M | } |
|
364 | | |
365 | 5.30M | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { |
366 | 5.30M | delete ptr_.exchange(ptr, memory_order); |
367 | 5.30M | } yb::AtomicUniquePtr<std::__1::function<int (std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)> >::reset(std::__1::function<int (std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)>*, std::__1::memory_order) Line | Count | Source | 365 | 572 | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 572 | delete ptr_.exchange(ptr, memory_order); | 367 | 572 | } |
yb::AtomicUniquePtr<yb::rpc::Strand>::reset(yb::rpc::Strand*, std::__1::memory_order) Line | Count | Source | 365 | 225k | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 225k | delete ptr_.exchange(ptr, memory_order); | 367 | 225k | } |
yb::AtomicUniquePtr<yb::Status>::reset(yb::Status*, std::__1::memory_order) Line | Count | Source | 365 | 18 | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 18 | delete ptr_.exchange(ptr, memory_order); | 367 | 18 | } |
yb::AtomicUniquePtr<yb::rpc::ThreadPool>::reset(yb::rpc::ThreadPool*, std::__1::memory_order) Line | Count | Source | 365 | 16.7k | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 16.7k | delete ptr_.exchange(ptr, memory_order); | 367 | 16.7k | } |
yb::AtomicUniquePtr<yb::ServerRegistrationPB>::reset(yb::ServerRegistrationPB*, std::__1::memory_order) Line | Count | Source | 365 | 8.03k | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 8.03k | delete ptr_.exchange(ptr, memory_order); | 367 | 8.03k | } |
yb::AtomicUniquePtr<yb::client::YBClient>::reset(yb::client::YBClient*, std::__1::memory_order) Line | Count | Source | 365 | 30.2k | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 30.2k | delete ptr_.exchange(ptr, memory_order); | 367 | 30.2k | } |
yb::AtomicUniquePtr<rocksdb::IndexReader>::reset(rocksdb::IndexReader*, std::__1::memory_order) Line | Count | Source | 365 | 68.2k | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 68.2k | delete ptr_.exchange(ptr, memory_order); | 367 | 68.2k | } |
yb::AtomicUniquePtr<ev::timer>::reset(ev::timer*, std::__1::memory_order) Line | Count | Source | 365 | 4.95M | void reset(T* ptr = nullptr, std::memory_order memory_order = std::memory_order_acq_rel) { | 366 | 4.95M | delete ptr_.exchange(ptr, memory_order); | 367 | 4.95M | } |
|
368 | | |
369 | 12.6M | T* release(std::memory_order memory_order = std::memory_order_acq_rel) { |
370 | 12.6M | return ptr_.exchange(nullptr, memory_order); |
371 | 12.6M | } yb::AtomicUniquePtr<yb::Status>::release(std::__1::memory_order) Line | Count | Source | 369 | 18 | T* release(std::memory_order memory_order = std::memory_order_acq_rel) { | 370 | 18 | return ptr_.exchange(nullptr, memory_order); | 371 | 18 | } |
yb::AtomicUniquePtr<ev::timer>::release(std::__1::memory_order) Line | Count | Source | 369 | 12.6M | T* release(std::memory_order memory_order = std::memory_order_acq_rel) { | 370 | 12.6M | return ptr_.exchange(nullptr, memory_order); | 371 | 12.6M | } |
|
372 | | |
373 | | private: |
374 | | std::atomic<T*> ptr_ = { nullptr }; |
375 | | }; |
376 | | |
377 | | template<class T, class... Args> |
378 | 18 | AtomicUniquePtr<T> MakeAtomicUniquePtr(Args&&... args) { |
379 | 18 | return AtomicUniquePtr<T>(new T(std::forward<Args>(args)...)); |
380 | 18 | } |
381 | | |
382 | | template <class T> |
383 | 6.27G | T GetAtomicFlag(T* flag) { |
384 | 6.27G | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); |
385 | 6.27G | return atomic_flag.load(std::memory_order::memory_order_relaxed); |
386 | 6.27G | } bool yb::GetAtomicFlag<bool>(bool*) Line | Count | Source | 383 | 4.43G | T GetAtomicFlag(T* flag) { | 384 | 4.43G | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 385 | 4.43G | return atomic_flag.load(std::memory_order::memory_order_relaxed); | 386 | 4.43G | } |
int yb::GetAtomicFlag<int>(int*) Line | Count | Source | 383 | 1.06G | T GetAtomicFlag(T* flag) { | 384 | 1.06G | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 385 | 1.06G | return atomic_flag.load(std::memory_order::memory_order_relaxed); | 386 | 1.06G | } |
double yb::GetAtomicFlag<double>(double*) Line | Count | Source | 383 | 3.85M | T GetAtomicFlag(T* flag) { | 384 | 3.85M | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 385 | 3.85M | return atomic_flag.load(std::memory_order::memory_order_relaxed); | 386 | 3.85M | } |
long long yb::GetAtomicFlag<long long>(long long*) Line | Count | Source | 383 | 345M | T GetAtomicFlag(T* flag) { | 384 | 345M | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 385 | 345M | return atomic_flag.load(std::memory_order::memory_order_relaxed); | 386 | 345M | } |
unsigned long long yb::GetAtomicFlag<unsigned long long>(unsigned long long*) Line | Count | Source | 383 | 422M | T GetAtomicFlag(T* flag) { | 384 | 422M | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 385 | 422M | return atomic_flag.load(std::memory_order::memory_order_relaxed); | 386 | 422M | } |
|
387 | | |
388 | | template <class U, class T> |
389 | 22.0k | void SetAtomicFlag(U value, T* flag) { |
390 | 22.0k | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); |
391 | 22.0k | atomic_flag.store(value); |
392 | 22.0k | } void yb::SetAtomicFlag<int, int>(int, int*) Line | Count | Source | 389 | 21.5k | void SetAtomicFlag(U value, T* flag) { | 390 | 21.5k | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 391 | 21.5k | atomic_flag.store(value); | 392 | 21.5k | } |
void yb::SetAtomicFlag<bool, bool>(bool, bool*) Line | Count | Source | 389 | 501 | void SetAtomicFlag(U value, T* flag) { | 390 | 501 | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 391 | 501 | atomic_flag.store(value); | 392 | 501 | } |
void yb::SetAtomicFlag<double, double>(double, double*) Line | Count | Source | 389 | 4 | void SetAtomicFlag(U value, T* flag) { | 390 | 4 | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); | 391 | 4 | atomic_flag.store(value); | 392 | 4 | } |
|
393 | | |
394 | | template <class T> |
395 | 11.3M | void AtomicFlagSleepMs(T* flag) { |
396 | 11.3M | auto value = GetAtomicFlag(flag); |
397 | 11.3M | if (value != 0) { |
398 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(value)); |
399 | 0 | } |
400 | 11.3M | } void yb::AtomicFlagSleepMs<long long>(long long*) Line | Count | Source | 395 | 4.41k | void AtomicFlagSleepMs(T* flag) { | 396 | 4.41k | auto value = GetAtomicFlag(flag); | 397 | 4.41k | if (value != 0) { | 398 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(value)); | 399 | 0 | } | 400 | 4.41k | } |
void yb::AtomicFlagSleepMs<unsigned long long>(unsigned long long*) Line | Count | Source | 395 | 1.78M | void AtomicFlagSleepMs(T* flag) { | 396 | 1.78M | auto value = GetAtomicFlag(flag); | 397 | 1.78M | if (value != 0) { | 398 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(value)); | 399 | 0 | } | 400 | 1.78M | } |
void yb::AtomicFlagSleepMs<int>(int*) Line | Count | Source | 395 | 9.57M | void AtomicFlagSleepMs(T* flag) { | 396 | 9.57M | auto value = GetAtomicFlag(flag); | 397 | 9.57M | if (value != 0) { | 398 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(value)); | 399 | 0 | } | 400 | 9.57M | } |
|
401 | | |
402 | | template <class T> |
403 | 11 | void AtomicFlagRandomSleepMs(T* flag) { |
404 | 11 | auto value = GetAtomicFlag(flag); |
405 | 11 | if (value != 0) { |
406 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(RandomUniformInt<T>(0, value))); |
407 | 0 | } |
408 | 11 | } void yb::AtomicFlagRandomSleepMs<int>(int*) Line | Count | Source | 403 | 11 | void AtomicFlagRandomSleepMs(T* flag) { | 404 | 11 | auto value = GetAtomicFlag(flag); | 405 | 11 | if (value != 0) { | 406 | 0 | std::this_thread::sleep_for(std::chrono::milliseconds(RandomUniformInt<T>(0, value))); | 407 | 0 | } | 408 | 11 | } |
Unexecuted instantiation: void yb::AtomicFlagRandomSleepMs<long long>(long long*) |
409 | | |
410 | | template <class U, class T> |
411 | 3 | bool CompareAndSetFlag(T* flag, U exp, U desired) { |
412 | 3 | std::atomic<T>& atomic_flag = *pointer_cast<std::atomic<T>*>(flag); |
413 | 3 | return atomic_flag.compare_exchange_strong(exp, desired); |
414 | 3 | } |
415 | | |
416 | | template<typename T> |
417 | 390k | void UpdateAtomicMax(std::atomic<T>* max_holder, T new_value) { |
418 | 390k | auto current_max = max_holder->load(std::memory_order_acquire); |
419 | 390k | while (new_value > current_max && !max_holder->compare_exchange_weak(current_max, new_value)347k ) {}0 |
420 | 390k | } void yb::UpdateAtomicMax<unsigned long long>(std::__1::atomic<unsigned long long>*, unsigned long long) Line | Count | Source | 417 | 390k | void UpdateAtomicMax(std::atomic<T>* max_holder, T new_value) { | 418 | 390k | auto current_max = max_holder->load(std::memory_order_acquire); | 419 | 390k | while (new_value > current_max && !max_holder->compare_exchange_weak(current_max, new_value)347k ) {}0 | 420 | 390k | } |
void yb::UpdateAtomicMax<yb::MonoTime>(std::__1::atomic<yb::MonoTime>*, yb::MonoTime) Line | Count | Source | 417 | 16 | void UpdateAtomicMax(std::atomic<T>* max_holder, T new_value) { | 418 | 16 | auto current_max = max_holder->load(std::memory_order_acquire); | 419 | 16 | while (new_value > current_max && !max_holder->compare_exchange_weak(current_max, new_value)) {}0 | 420 | 16 | } |
|
421 | | |
422 | | class AtomicTryMutex { |
423 | | public: |
424 | 25.6M | void unlock() { |
425 | 25.6M | auto value = locked_.exchange(false, std::memory_order_acq_rel); |
426 | 25.6M | DCHECK(value); |
427 | 25.6M | } |
428 | | |
429 | 31.8M | bool try_lock() { |
430 | 31.8M | bool expected = false; |
431 | 31.8M | return locked_.compare_exchange_strong(expected, true, std::memory_order_acq_rel); |
432 | 31.8M | } |
433 | | |
434 | 55.0M | bool is_locked() const { |
435 | 55.0M | return locked_.load(std::memory_order_acquire); |
436 | 55.0M | } |
437 | | |
438 | | private: |
439 | | std::atomic<bool> locked_{false}; |
440 | | }; |
441 | | |
442 | | template <class T, class D> |
443 | 9.26M | T AddFetch(std::atomic<T>* atomic, const D& delta, std::memory_order memory_order) { |
444 | 9.26M | return atomic->fetch_add(delta, memory_order) + delta; |
445 | 9.26M | } |
446 | | |
447 | | // ------------------------------------------------------------------------------------------------ |
448 | | // A utility for testing if an atomic is lock-free. |
449 | | |
450 | | namespace atomic_internal { |
451 | | |
452 | | template <class T> |
453 | 31.7M | bool IsAcceptableAtomicImpl(const T& atomic_variable) { |
454 | 31.7M | #ifdef __aarch64__ |
455 | | // TODO: ensure we are using proper 16-byte atomics on aarch64. |
456 | | // https://github.com/yugabyte/yugabyte-db/issues/9196 |
457 | 31.7M | return true; |
458 | | #else |
459 | | return atomic_variable.is_lock_free(); |
460 | | #endif |
461 | 31.7M | } bool yb::atomic_internal::IsAcceptableAtomicImpl<std::__1::atomic<unsigned long long> >(std::__1::atomic<unsigned long long> const&) Line | Count | Source | 453 | 17.3k | bool IsAcceptableAtomicImpl(const T& atomic_variable) { | 454 | 17.3k | #ifdef __aarch64__ | 455 | | // TODO: ensure we are using proper 16-byte atomics on aarch64. | 456 | | // https://github.com/yugabyte/yugabyte-db/issues/9196 | 457 | 17.3k | return true; | 458 | | #else | 459 | | return atomic_variable.is_lock_free(); | 460 | | #endif | 461 | 17.3k | } |
bool yb::atomic_internal::IsAcceptableAtomicImpl<boost::atomics::atomic<yb::OpId> >(boost::atomics::atomic<yb::OpId> const&) Line | Count | Source | 453 | 31.4M | bool IsAcceptableAtomicImpl(const T& atomic_variable) { | 454 | 31.4M | #ifdef __aarch64__ | 455 | | // TODO: ensure we are using proper 16-byte atomics on aarch64. | 456 | | // https://github.com/yugabyte/yugabyte-db/issues/9196 | 457 | 31.4M | return true; | 458 | | #else | 459 | | return atomic_variable.is_lock_free(); | 460 | | #endif | 461 | 31.4M | } |
bool yb::atomic_internal::IsAcceptableAtomicImpl<boost::atomics::atomic<yb::consensus::ReplicaState::LeaderStateCache> >(boost::atomics::atomic<yb::consensus::ReplicaState::LeaderStateCache> const&) Line | Count | Source | 453 | 150k | bool IsAcceptableAtomicImpl(const T& atomic_variable) { | 454 | 150k | #ifdef __aarch64__ | 455 | | // TODO: ensure we are using proper 16-byte atomics on aarch64. | 456 | | // https://github.com/yugabyte/yugabyte-db/issues/9196 | 457 | 150k | return true; | 458 | | #else | 459 | | return atomic_variable.is_lock_free(); | 460 | | #endif | 461 | 150k | } |
stack_trace.cc:bool yb::atomic_internal::IsAcceptableAtomicImpl<boost::atomics::atomic<yb::LockFreeStack<yb::(anonymous namespace)::ThreadStackEntry>::Head> >(boost::atomics::atomic<yb::LockFreeStack<yb::(anonymous namespace)::ThreadStackEntry>::Head> const&) Line | Count | Source | 453 | 65.4k | bool IsAcceptableAtomicImpl(const T& atomic_variable) { | 454 | 65.4k | #ifdef __aarch64__ | 455 | | // TODO: ensure we are using proper 16-byte atomics on aarch64. | 456 | | // https://github.com/yugabyte/yugabyte-db/issues/9196 | 457 | 65.4k | return true; | 458 | | #else | 459 | | return atomic_variable.is_lock_free(); | 460 | | #endif | 461 | 65.4k | } |
|
462 | | |
463 | | } // namespace atomic_internal |
464 | | |
465 | | template <class T> |
466 | 31.6M | bool IsAcceptableAtomicImpl(const boost::atomics::atomic<T>& atomic_variable) { |
467 | 31.6M | return atomic_internal::IsAcceptableAtomicImpl(atomic_variable); |
468 | 31.6M | } bool yb::IsAcceptableAtomicImpl<yb::OpId>(boost::atomics::atomic<yb::OpId> const&) Line | Count | Source | 466 | 31.4M | bool IsAcceptableAtomicImpl(const boost::atomics::atomic<T>& atomic_variable) { | 467 | 31.4M | return atomic_internal::IsAcceptableAtomicImpl(atomic_variable); | 468 | 31.4M | } |
bool yb::IsAcceptableAtomicImpl<yb::consensus::ReplicaState::LeaderStateCache>(boost::atomics::atomic<yb::consensus::ReplicaState::LeaderStateCache> const&) Line | Count | Source | 466 | 150k | bool IsAcceptableAtomicImpl(const boost::atomics::atomic<T>& atomic_variable) { | 467 | 150k | return atomic_internal::IsAcceptableAtomicImpl(atomic_variable); | 468 | 150k | } |
stack_trace.cc:bool yb::IsAcceptableAtomicImpl<yb::LockFreeStack<yb::(anonymous namespace)::ThreadStackEntry>::Head>(boost::atomics::atomic<yb::LockFreeStack<yb::(anonymous namespace)::ThreadStackEntry>::Head> const&) Line | Count | Source | 466 | 65.4k | bool IsAcceptableAtomicImpl(const boost::atomics::atomic<T>& atomic_variable) { | 467 | 65.4k | return atomic_internal::IsAcceptableAtomicImpl(atomic_variable); | 468 | 65.4k | } |
|
469 | | |
470 | | template <class T> |
471 | 17.3k | bool IsAcceptableAtomicImpl(const std::atomic<T>& atomic_variable) { |
472 | 17.3k | return atomic_internal::IsAcceptableAtomicImpl(atomic_variable); |
473 | 17.3k | } |
474 | | |
475 | | } // namespace yb |
476 | | #endif /* YB_UTIL_ATOMIC_H */ |