YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/yb/gutil/atomicops-internals-macosx.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2006 Google Inc.
2
// All Rights Reserved.
3
//
4
// The following only applies to changes made to this file as part of YugaByte development.
5
//
6
// Portions Copyright (c) YugaByte, Inc.
7
//
8
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
9
// in compliance with the License.  You may obtain a copy of the License at
10
//
11
// http://www.apache.org/licenses/LICENSE-2.0
12
//
13
// Unless required by applicable law or agreed to in writing, software distributed under the License
14
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
15
// or implied.  See the License for the specific language governing permissions and limitations
16
// under the License.
17
//
18
//
19
// Implementation of atomic operations for Mac OS X.  This file should not
20
// be included directly.  Clients should instead include
21
// "base/atomicops.h".
22
23
#ifndef YB_GUTIL_ATOMICOPS_INTERNALS_MACOSX_H
24
#define YB_GUTIL_ATOMICOPS_INTERNALS_MACOSX_H
25
26
typedef int32_t Atomic32;
27
typedef int64_t Atomic64;
28
29
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
30
// on the Mac, even when they are the same size.  Similarly, on __ppc64__,
31
// AtomicWord and Atomic64 are always different.  Thus, we need explicit
32
// casting.
33
#ifdef __LP64__
34
#define AtomicWordCastType base::subtle::Atomic64
35
#else
36
#define AtomicWordCastType Atomic32
37
#endif
38
39
#if defined(__LP64__) || defined(__i386__)
40
#define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
41
#endif
42
43
#include <libkern/OSAtomic.h>
44
45
#if !defined(__LP64__) && defined(__ppc__)
46
47
// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
48
// while the underlying assembly instructions are available only some
49
// implementations of PowerPC.
50
51
// The following inline functions will fail with the error message at compile
52
// time ONLY IF they are called.  So it is safe to use this header if user
53
// code only calls AtomicWord and Atomic32 operations.
54
//
55
// NOTE(user): Implementation notes to implement the atomic ops below may
56
// be found in "PowerPC Virtual Environment Architecture, Book II,
57
// Version 2.02", January 28, 2005, Appendix B, page 46.  Unfortunately,
58
// extra care must be taken to ensure data are properly 8-byte aligned, and
59
// that data are returned correctly according to Mac OS X ABI specs.
60
61
inline int64_t OSAtomicCompareAndSwap64(
62
    int64_t oldValue, int64_t newValue, int64_t *theValue) {
63
  __asm__ __volatile__(
64
      "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
65
  return 0;
66
}
67
68
inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
69
  __asm__ __volatile__(
70
      "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
71
  return 0;
72
}
73
74
inline int64_t OSAtomicCompareAndSwap64Barrier(
75
    int64_t oldValue, int64_t newValue, int64_t *theValue) {
76
  int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
77
  OSMemoryBarrier();
78
  return prev;
79
}
80
81
inline int64_t OSAtomicAdd64Barrier(
82
    int64_t theAmount, int64_t *theValue) {
83
  int64_t new_val = OSAtomicAdd64(theAmount, theValue);
84
  OSMemoryBarrier();
85
  return new_val;
86
}
87
#endif
88
89
90
namespace base {
91
namespace subtle {
92
93
typedef int32_t Atomic32;
94
typedef int64_t Atomic64;
95
96
2.29G
inline void MemoryBarrier() {
97
2.29G
  OSMemoryBarrier();
98
2.29G
}
99
100
// 32-bit Versions.
101
102
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
103
                                         Atomic32 old_value,
104
83.8M
                                         Atomic32 new_value) {
105
83.8M
  Atomic32 prev_value;
106
83.8M
  do {
107
83.8M
    if (OSAtomicCompareAndSwap32(old_value, new_value,
108
83.8M
                                 const_cast<Atomic32*>(ptr))) {
109
75.8M
      return old_value;
110
75.8M
    }
111
7.93M
    prev_value = *ptr;
112
7.93M
  } while (prev_value == old_value);
113
7.92M
  return prev_value;
114
83.8M
}
115
116
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
117
94.2k
                                         Atomic32 new_value) {
118
94.2k
  Atomic32 old_value;
119
94.2k
  do {
120
94.2k
    old_value = *ptr;
121
94.2k
  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
122
94.2k
                                     const_cast<Atomic32*>(ptr)));
123
94.2k
  return old_value;
124
94.2k
}
125
126
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
127
4.41G
                                       Atomic32 new_value) {
128
4.41G
  Atomic32 old_value;
129
4.41G
  do {
130
4.41G
    old_value = *ptr;
131
4.41G
  } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
132
4.41G
                                            const_cast<Atomic32*>(ptr)));
133
4.41G
  return old_value;
134
4.41G
}
135
136
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
137
4.41G
                                       Atomic32 new_value) {
138
4.41G
  return Acquire_AtomicExchange(ptr, new_value);
139
4.41G
}
140
141
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
142
10.1M
                                          Atomic32 increment) {
143
10.1M
  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
144
10.1M
}
145
146
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
147
11
                                          Atomic32 increment) {
148
11
  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
149
11
}
150
151
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
152
                                       Atomic32 old_value,
153
6.89G
                                       Atomic32 new_value) {
154
6.89G
  Atomic32 prev_value;
155
6.89G
  do {
156
6.89G
    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
157
6.89G
                                        const_cast<Atomic32*>(ptr))) {
158
6.61G
      return old_value;
159
6.61G
    }
160
287M
    prev_value = *ptr;
161
287M
  } while (prev_value == old_value);
162
286M
  return prev_value;
163
6.89G
}
164
165
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
166
                                       Atomic32 old_value,
167
1.08G
                                       Atomic32 new_value) {
168
1.08G
  return Acquire_CompareAndSwap(ptr, old_value, new_value);
169
1.08G
}
170
171
12.2M
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
172
12.2M
  *ptr = value;
173
12.2M
}
174
175
7
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
176
7
  *ptr = value;
177
7
  MemoryBarrier();
178
7
}
179
180
76.5M
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
181
76.5M
  MemoryBarrier();
182
76.5M
  *ptr = value;
183
76.5M
}
184
185
29.1G
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
186
29.1G
  return *ptr;
187
29.1G
}
188
189
1.55G
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
190
1.55G
  Atomic32 value = *ptr;
191
1.55G
  MemoryBarrier();
192
1.55G
  return value;
193
1.55G
}
194
195
41
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
196
41
  MemoryBarrier();
197
41
  return *ptr;
198
41
}
199
200
// 64-bit version
201
202
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
203
                                         Atomic64 old_value,
204
1.21G
                                         Atomic64 new_value) {
205
1.21G
  Atomic64 prev_value;
206
1.21G
  do {
207
1.21G
    if (OSAtomicCompareAndSwap64(old_value, new_value,
208
1.21G
                                 const_cast<Atomic64*>(ptr))) {
209
1.13G
      return old_value;
210
1.13G
    }
211
80.1M
    prev_value = *ptr;
212
80.1M
  } while (prev_value == old_value);
213
80.1M
  return prev_value;
214
1.21G
}
215
216
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
217
4
                                         Atomic64 new_value) {
218
4
  Atomic64 old_value;
219
4
  do {
220
4
    old_value = *ptr;
221
4
  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
222
4
                                     const_cast<Atomic64*>(ptr)));
223
4
  return old_value;
224
4
}
225
226
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
227
30
                                       Atomic64 new_value) {
228
30
  Atomic64 old_value;
229
30
  do {
230
30
    old_value = *ptr;
231
30
  } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
232
30
                                            const_cast<Atomic64*>(ptr)));
233
30
  return old_value;
234
30
}
235
236
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
237
2
                                       Atomic64 new_value) {
238
2
  return Acquire_AtomicExchange(ptr, new_value);
239
2
}
240
241
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
242
10.0G
                                          Atomic64 increment) {
243
10.0G
  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
244
10.0G
}
245
246
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
247
370M
                                        Atomic64 increment) {
248
370M
  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
249
370M
}
250
251
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
252
                                       Atomic64 old_value,
253
44
                                       Atomic64 new_value) {
254
44
  Atomic64 prev_value;
255
44
  do {
256
44
    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
257
44
                                        const_cast<Atomic64*>(ptr))) {
258
36
      return old_value;
259
36
    }
260
8
    prev_value = *ptr;
261
8
  } while (prev_value == old_value);
262
8
  return prev_value;
263
44
}
264
265
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
266
                                       Atomic64 old_value,
267
22
                                       Atomic64 new_value) {
268
  // The lib kern interface does not distinguish between
269
  // Acquire and Release memory barriers; they are equivalent.
270
22
  return Acquire_CompareAndSwap(ptr, old_value, new_value);
271
22
}
272
273
#ifdef __LP64__
274
275
// 64-bit implementation on 64-bit platform
276
277
8.94G
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
278
8.94G
  *ptr = value;
279
8.94G
}
280
281
// Issue the x86 "pause" instruction, which tells the CPU that we
282
// are in a spinlock wait loop and should allow other hyperthreads
283
// to run, not speculate memory access, etc.
284
26.3G
inline void PauseCPU() {
285
#ifdef __x86_64__
286
  __asm__ __volatile__("pause" : : : "memory");
287
#elif defined(__arm64__)
288
26.3G
  __asm__ __volatile__("yield" : : : "memory");
289
#else
290
  #error "PauseCPU is only supported for x86_64 and arm64 on macOS"
291
#endif
292
26.3G
}
293
294
6
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
295
6
  *ptr = value;
296
6
  MemoryBarrier();
297
6
}
298
299
3.98M
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
300
3.98M
  MemoryBarrier();
301
3.98M
  *ptr = value;
302
3.98M
}
303
304
9.46G
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
305
9.46G
  return *ptr;
306
9.46G
}
307
308
535M
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
309
535M
  Atomic64 value = *ptr;
310
535M
  MemoryBarrier();
311
535M
  return value;
312
535M
}
313
314
121M
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
315
121M
  MemoryBarrier();
316
121M
  return *ptr;
317
121M
}
318
319
#else
320
321
// 64-bit implementation on 32-bit platform
322
323
#if defined(__ppc__)
324
325
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
326
  __asm__ __volatile__(
327
      "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
328
}
329
330
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
331
  __asm__ __volatile__(
332
      "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
333
  return 0;
334
}
335
336
#elif defined(__i386__)
337
338
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
339
  __asm__ __volatile__("movq %1, %%mm0\n\t"    // Use mmx reg for 64-bit atomic
340
                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
341
                       "emms\n\t"              // Reset FP registers
342
                       : "=m" (*ptr)
343
                       : "m" (value)
344
                       : // mark the FP stack and mmx registers as clobbered
345
                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
346
                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
347
                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
348
349
}
350
351
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
352
  Atomic64 value;
353
  __asm__ __volatile__("movq %1, %%mm0\n\t"  // Use mmx reg for 64-bit atomic
354
                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
355
                       "emms\n\t"            // Reset FP registers
356
                       : "=m" (value)
357
                       : "m" (*ptr)
358
                       : // mark the FP stack and mmx registers as clobbered
359
                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
360
                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
361
                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
362
363
  return value;
364
}
365
366
#elif defined(__arm__)
367
368
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
369
  int store_failed;
370
  Atomic64 dummy;
371
  __asm__ __volatile__(
372
      "1:\n"
373
      // Dummy load to lock cache line.
374
      "ldrexd  %1, [%3]\n"
375
      "strexd  %0, %2, [%3]\n"
376
      "teq     %0, #0\n"
377
      "bne     1b"
378
      : "=&r" (store_failed), "=&r"(dummy)
379
      : "r"(value), "r" (ptr)
380
      : "cc", "memory");
381
}
382
383
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
384
  Atomic64 res;
385
  __asm__ __volatile__(
386
      "ldrexd   %0, [%1]\n"
387
      "clrex\n"
388
      : "=r" (res)
389
      : "r"(ptr), "Q"(*ptr));
390
  return res;
391
}
392
393
#endif
394
395
396
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
397
  NoBarrier_Store(ptr, value);
398
  MemoryBarrier();
399
}
400
401
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
402
  MemoryBarrier();
403
  NoBarrier_Store(ptr, value);
404
}
405
406
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
407
  Atomic64 value = NoBarrier_Load(ptr);
408
  MemoryBarrier();
409
  return value;
410
}
411
412
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
413
  MemoryBarrier();
414
  return NoBarrier_Load(ptr);
415
}
416
#endif  // __LP64__
417
418
}   // namespace subtle
419
}   // namespace base
420
421
// NOTE(user): The following is also deprecated.  New callers should use
422
// the base::subtle namespace.
423
0
inline void MemoryBarrier() {
424
0
  base::subtle::MemoryBarrier();
425
0
}
426
427
#endif  // YB_GUTIL_ATOMICOPS_INTERNALS_MACOSX_H