/Users/deen/code/yugabyte-db/src/yb/gutil/atomicops.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2003 Google Inc. |
2 | | // All Rights Reserved. |
3 | | // |
4 | | // The following only applies to changes made to this file as part of YugaByte development. |
5 | | // |
6 | | // Portions Copyright (c) YugaByte, Inc. |
7 | | // |
8 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
9 | | // in compliance with the License. You may obtain a copy of the License at |
10 | | // |
11 | | // http://www.apache.org/licenses/LICENSE-2.0 |
12 | | // |
13 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
14 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
15 | | // or implied. See the License for the specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // |
18 | | |
19 | | // For atomic operations on statistics counters, see atomic_stats_counter.h. |
20 | | // For atomic operations on sequence numbers, see atomic_sequence_num.h. |
21 | | // For atomic operations on reference counts, see atomic_refcount.h. |
22 | | |
23 | | // Some fast atomic operations -- typically with machine-dependent |
24 | | // implementations. This file may need editing as Google code is |
25 | | // ported to different architectures. |
26 | | |
27 | | // The routines exported by this module are subtle. If you use them, even if |
28 | | // you get the code right, it will depend on careful reasoning about atomicity |
29 | | // and memory ordering; it will be less readable, and harder to maintain. If |
30 | | // you plan to use these routines, you should have a good reason, such as solid |
31 | | // evidence that performance would otherwise suffer, or there being no |
32 | | // alternative. You should assume only properties explicitly guaranteed by the |
33 | | // specifications in this file. You are almost certainly _not_ writing code |
34 | | // just for the x86; if you assume x86 semantics, x86 hardware bugs and |
35 | | // implementations on other archtectures will cause your code to break. If you |
36 | | // do not know what you are doing, avoid these routines, and use a Mutex. |
37 | | // |
38 | | // These following lower-level operations are typically useful only to people |
39 | | // implementing higher-level synchronization operations like spinlocks, |
40 | | // mutexes, and condition-variables. They combine CompareAndSwap(), |
41 | | // addition, exchange, a load, or a store with appropriate memory-ordering |
42 | | // instructions. "Acquire" operations ensure that no later memory access by |
43 | | // the same thread can be reordered ahead of the operation. "Release" |
44 | | // operations ensure that no previous memory access by the same thread can be |
45 | | // reordered after the operation. "Barrier" operations have both "Acquire" and |
46 | | // "Release" semantics. A MemoryBarrier() has "Barrier" semantics, but does no |
47 | | // memory access. "NoBarrier" operations have no barrier: the CPU is |
48 | | // permitted to reorder them freely (as seen by other threads), even in ways |
49 | | // the appear to violate functional dependence, just as it can for any normal |
50 | | // variable access. |
51 | | // |
52 | | // It is incorrect to make direct assignments to/from an atomic variable. |
53 | | // You should use one of the Load or Store routines. The NoBarrier |
54 | | // versions are provided when no barriers are needed: |
55 | | // NoBarrier_Store() |
56 | | // NoBarrier_Load() |
57 | | // Although there are currently no compiler enforcement, you are encouraged |
58 | | // to use these. Moreover, if you choose to use base::subtle::Atomic64 type, |
59 | | // you MUST use one of the Load or Store routines to get correct behavior |
60 | | // on 32-bit platforms. |
61 | | // |
62 | | // The intent is eventually to put all of these routines in namespace |
63 | | // base::subtle |
64 | | |
65 | | #ifndef YB_GUTIL_ATOMICOPS_H |
66 | | #define YB_GUTIL_ATOMICOPS_H |
67 | | |
68 | | #include <stdint.h> |
69 | | |
70 | | // ------------------------------------------------------------------------ |
71 | | // Include the platform specific implementations of the types |
72 | | // and operations listed below. Implementations are to provide Atomic32 |
73 | | // and Atomic64 operations. If there is a mismatch between intptr_t and |
74 | | // the Atomic32 or Atomic64 types for a platform, the platform-specific header |
75 | | // should define the macro, AtomicWordCastType in a clause similar to the |
76 | | // following: |
77 | | // #if ...pointers are 64 bits... |
78 | | // # define AtomicWordCastType base::subtle::Atomic64 |
79 | | // #else |
80 | | // # define AtomicWordCastType Atomic32 |
81 | | // #endif |
82 | | // ------------------------------------------------------------------------ |
83 | | |
84 | | #include "yb/gutil/arm_instruction_set_select.h" |
85 | | |
86 | | // ThreadSanitizer provides own implementation of atomicops. |
87 | | #if defined(THREAD_SANITIZER) |
88 | | #include "yb/gutil/atomicops-internals-tsan.h" |
89 | | #elif defined(__APPLE__) |
90 | | #include "yb/gutil/atomicops-internals-macosx.h" |
91 | | #elif defined(__GNUC__) && defined(ARMV6) |
92 | | #include "yb/gutil/atomicops-internals-arm-v6plus.h" |
93 | | #elif defined(ARMV3) |
94 | | #include "yb/gutil/atomicops-internals-arm-generic.h" |
95 | | #elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__)) |
96 | | #include "yb/gutil/atomicops-internals-x86.h" |
97 | | #elif defined(__GNUC__) && defined(ARCH_POWERPC64) |
98 | | #include "yb/gutil/atomicops-internals-powerpc.h" |
99 | | #elif defined(OS_WINDOWS) |
100 | | #include "yb/gutil/atomicops-internals-windows.h" |
101 | | #elif defined(__GNUC__) && defined(__aarch64__) |
102 | | #include "yb/gutil/atomicops-internals-arm64.h" |
103 | | #else |
104 | | #error You need to implement atomic operations for this architecture |
105 | | #endif |
106 | | |
107 | | // Signed type that can hold a pointer and supports the atomic ops below, as |
108 | | // well as atomic loads and stores. Instances must be naturally-aligned. |
109 | | typedef intptr_t AtomicWord; |
110 | | |
111 | | #ifdef AtomicWordCastType |
112 | | // ------------------------------------------------------------------------ |
113 | | // This section is needed only when explicit type casting is required to |
114 | | // cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32). |
115 | | // It also serves to document the AtomicWord interface. |
116 | | // ------------------------------------------------------------------------ |
117 | | |
118 | | namespace base { |
119 | | namespace subtle { |
120 | | |
121 | | // Atomically execute: |
122 | | // result = *ptr; |
123 | | // if (*ptr == old_value) |
124 | | // *ptr = new_value; |
125 | | // return result; |
126 | | // |
127 | | // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
128 | | // Always return the old value of "*ptr" |
129 | | // |
130 | | // This routine implies no memory barriers. |
131 | | inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, |
132 | | AtomicWord old_value, |
133 | 0 | AtomicWord new_value) { |
134 | 0 | return NoBarrier_CompareAndSwap( |
135 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
136 | 0 | old_value, new_value); |
137 | 0 | } |
138 | | |
139 | | // Atomically store new_value into *ptr, returning the previous value held in |
140 | | // *ptr. This routine implies no memory barriers. |
141 | | inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, |
142 | 0 | AtomicWord new_value) { |
143 | 0 | return NoBarrier_AtomicExchange( |
144 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
145 | 0 | } |
146 | | |
147 | | inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr, |
148 | 26 | AtomicWord new_value) { |
149 | 26 | return Acquire_AtomicExchange( |
150 | 26 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
151 | 26 | } |
152 | | |
153 | | inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr, |
154 | 0 | AtomicWord new_value) { |
155 | 0 | return Release_AtomicExchange( |
156 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
157 | 0 | } |
158 | | |
159 | | // Atomically increment *ptr by "increment". Returns the new value of |
160 | | // *ptr with the increment applied. This routine implies no memory |
161 | | // barriers. |
162 | | inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, |
163 | 32 | AtomicWord increment) { |
164 | 32 | return NoBarrier_AtomicIncrement( |
165 | 32 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
166 | 32 | } |
167 | | |
168 | | inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, |
169 | 0 | AtomicWord increment) { |
170 | 0 | return Barrier_AtomicIncrement( |
171 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
172 | 0 | } |
173 | | |
174 | | inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, |
175 | | AtomicWord old_value, |
176 | 0 | AtomicWord new_value) { |
177 | 0 | return base::subtle::Acquire_CompareAndSwap( |
178 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
179 | 0 | old_value, new_value); |
180 | 0 | } |
181 | | |
182 | | inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, |
183 | | AtomicWord old_value, |
184 | 0 | AtomicWord new_value) { |
185 | 0 | return base::subtle::Release_CompareAndSwap( |
186 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
187 | 0 | old_value, new_value); |
188 | 0 | } |
189 | | |
190 | 669k | inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { |
191 | 669k | NoBarrier_Store( |
192 | 669k | reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
193 | 669k | } |
194 | | |
195 | 0 | inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { |
196 | 0 | return base::subtle::Acquire_Store( |
197 | 0 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
198 | 0 | } |
199 | | |
200 | 147k | inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { |
201 | 147k | return base::subtle::Release_Store( |
202 | 147k | reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
203 | 147k | } |
204 | | |
205 | 433M | inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { |
206 | 433M | return NoBarrier_Load( |
207 | 433M | reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
208 | 433M | } |
209 | | |
210 | 795k | inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { |
211 | 795k | return base::subtle::Acquire_Load( |
212 | 795k | reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
213 | 795k | } |
214 | | |
215 | 0 | inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
216 | 0 | return base::subtle::Release_Load( |
217 | 0 | reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
218 | 0 | } |
219 | | |
220 | | } // namespace subtle |
221 | | } // namespace base |
222 | | #endif // AtomicWordCastType |
223 | | |
224 | | // ------------------------------------------------------------------------ |
225 | | // Commented out type definitions and method declarations for documentation |
226 | | // of the interface provided by this module. |
227 | | // ------------------------------------------------------------------------ |
228 | | |
229 | | #if 0 |
230 | | |
231 | | // Signed 32-bit type that supports the atomic ops below, as well as atomic |
232 | | // loads and stores. Instances must be naturally aligned. This type differs |
233 | | // from AtomicWord in 64-bit binaries where AtomicWord is 64-bits. |
234 | | typedef int32_t Atomic32; |
235 | | |
236 | | // Corresponding operations on Atomic32 |
237 | | namespace base { |
238 | | namespace subtle { |
239 | | |
240 | | // Signed 64-bit type that supports the atomic ops below, as well as atomic |
241 | | // loads and stores. Instances must be naturally aligned. This type differs |
242 | | // from AtomicWord in 32-bit binaries where AtomicWord is 32-bits. |
243 | | typedef int64_t Atomic64; |
244 | | |
245 | | Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
246 | | Atomic32 old_value, |
247 | | Atomic32 new_value); |
248 | | Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); |
249 | | Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); |
250 | | Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); |
251 | | Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); |
252 | | Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
253 | | Atomic32 increment); |
254 | | Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
255 | | Atomic32 old_value, |
256 | | Atomic32 new_value); |
257 | | Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
258 | | Atomic32 old_value, |
259 | | Atomic32 new_value); |
260 | | void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); |
261 | | void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); |
262 | | void Release_Store(volatile Atomic32* ptr, Atomic32 value); |
263 | | Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); |
264 | | Atomic32 Acquire_Load(volatile const Atomic32* ptr); |
265 | | Atomic32 Release_Load(volatile const Atomic32* ptr); |
266 | | |
267 | | // Corresponding operations on Atomic64 |
268 | | Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
269 | | Atomic64 old_value, |
270 | | Atomic64 new_value); |
271 | | Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); |
272 | | Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); |
273 | | Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); |
274 | | Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
275 | | Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
276 | | |
277 | | Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
278 | | Atomic64 old_value, |
279 | | Atomic64 new_value); |
280 | | Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
281 | | Atomic64 old_value, |
282 | | Atomic64 new_value); |
283 | | void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); |
284 | | void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); |
285 | | void Release_Store(volatile Atomic64* ptr, Atomic64 value); |
286 | | Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); |
287 | | Atomic64 Acquire_Load(volatile const Atomic64* ptr); |
288 | | Atomic64 Release_Load(volatile const Atomic64* ptr); |
289 | | } // namespace subtle |
290 | | } // namespace base |
291 | | |
292 | | void MemoryBarrier(); |
293 | | |
294 | | void PauseCPU(); |
295 | | |
296 | | #endif // 0 |
297 | | |
298 | | |
299 | | // ------------------------------------------------------------------------ |
300 | | // The following are to be deprecated when all uses have been changed to |
301 | | // use the base::subtle namespace. |
302 | | // ------------------------------------------------------------------------ |
303 | | |
304 | | #ifdef AtomicWordCastType |
305 | | // AtomicWord versions to be deprecated |
306 | | inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, |
307 | | AtomicWord old_value, |
308 | 0 | AtomicWord new_value) { |
309 | 0 | return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); |
310 | 0 | } |
311 | | |
312 | | inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, |
313 | | AtomicWord old_value, |
314 | 0 | AtomicWord new_value) { |
315 | 0 | return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); |
316 | 0 | } |
317 | | |
318 | 0 | inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { |
319 | 0 | return base::subtle::Acquire_Store(ptr, value); |
320 | 0 | } |
321 | | |
322 | 0 | inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { |
323 | 0 | return base::subtle::Release_Store(ptr, value); |
324 | 0 | } |
325 | | |
326 | 0 | inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { |
327 | 0 | return base::subtle::Acquire_Load(ptr); |
328 | 0 | } |
329 | | |
330 | 0 | inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
331 | 0 | return base::subtle::Release_Load(ptr); |
332 | 0 | } |
333 | | #endif // AtomicWordCastType |
334 | | |
335 | | // 32-bit Acquire/Release operations to be deprecated. |
336 | | |
337 | | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
338 | | Atomic32 old_value, |
339 | 0 | Atomic32 new_value) { |
340 | 0 | return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); |
341 | 0 | } |
342 | | inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
343 | | Atomic32 old_value, |
344 | 0 | Atomic32 new_value) { |
345 | 0 | return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); |
346 | 0 | } |
347 | 0 | inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
348 | 0 | base::subtle::Acquire_Store(ptr, value); |
349 | 0 | } |
350 | 31.4M | inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
351 | 31.4M | return base::subtle::Release_Store(ptr, value); |
352 | 31.4M | } |
353 | 214M | inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
354 | 214M | return base::subtle::Acquire_Load(ptr); |
355 | 214M | } |
356 | 0 | inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
357 | 0 | return base::subtle::Release_Load(ptr); |
358 | 0 | } |
359 | | |
360 | | // 64-bit Acquire/Release operations to be deprecated. |
361 | | |
362 | | inline base::subtle::Atomic64 Acquire_CompareAndSwap( |
363 | | volatile base::subtle::Atomic64* ptr, |
364 | 0 | base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) { |
365 | 0 | return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); |
366 | 0 | } |
367 | | inline base::subtle::Atomic64 Release_CompareAndSwap( |
368 | | volatile base::subtle::Atomic64* ptr, |
369 | 0 | base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) { |
370 | 0 | return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); |
371 | 0 | } |
372 | | inline void Acquire_Store( |
373 | 0 | volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { |
374 | 0 | base::subtle::Acquire_Store(ptr, value); |
375 | 0 | } |
376 | | inline void Release_Store( |
377 | 0 | volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { |
378 | 0 | return base::subtle::Release_Store(ptr, value); |
379 | 0 | } |
380 | | inline base::subtle::Atomic64 Acquire_Load( |
381 | 5.19M | volatile const base::subtle::Atomic64* ptr) { |
382 | 5.19M | return base::subtle::Acquire_Load(ptr); |
383 | 5.19M | } |
384 | | inline base::subtle::Atomic64 Release_Load( |
385 | 0 | volatile const base::subtle::Atomic64* ptr) { |
386 | 0 | return base::subtle::Release_Load(ptr); |
387 | 0 | } |
388 | | |
389 | | #endif // YB_GUTIL_ATOMICOPS_H |