YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/gutil/atomicops-internals-macosx.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2006 Google Inc.
2
// All Rights Reserved.
3
//
4
// The following only applies to changes made to this file as part of YugaByte development.
5
//
6
// Portions Copyright (c) YugaByte, Inc.
7
//
8
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
9
// in compliance with the License.  You may obtain a copy of the License at
10
//
11
// http://www.apache.org/licenses/LICENSE-2.0
12
//
13
// Unless required by applicable law or agreed to in writing, software distributed under the License
14
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
15
// or implied.  See the License for the specific language governing permissions and limitations
16
// under the License.
17
//
18
//
19
// Implementation of atomic operations for Mac OS X.  This file should not
20
// be included directly.  Clients should instead include
21
// "base/atomicops.h".
22
23
#ifndef YB_GUTIL_ATOMICOPS_INTERNALS_MACOSX_H
24
#define YB_GUTIL_ATOMICOPS_INTERNALS_MACOSX_H
25
26
typedef int32_t Atomic32;
27
typedef int64_t Atomic64;
28
29
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
30
// on the Mac, even when they are the same size.  Similarly, on __ppc64__,
31
// AtomicWord and Atomic64 are always different.  Thus, we need explicit
32
// casting.
33
#ifdef __LP64__
34
#define AtomicWordCastType base::subtle::Atomic64
35
#else
36
#define AtomicWordCastType Atomic32
37
#endif
38
39
#if defined(__LP64__) || defined(__i386__)
40
#define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
41
#endif
42
43
#include <libkern/OSAtomic.h>
44
45
#if !defined(__LP64__) && defined(__ppc__)
46
47
// The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
48
// while the underlying assembly instructions are available only some
49
// implementations of PowerPC.
50
51
// The following inline functions will fail with the error message at compile
52
// time ONLY IF they are called.  So it is safe to use this header if user
53
// code only calls AtomicWord and Atomic32 operations.
54
//
55
// NOTE(user): Implementation notes to implement the atomic ops below may
56
// be found in "PowerPC Virtual Environment Architecture, Book II,
57
// Version 2.02", January 28, 2005, Appendix B, page 46.  Unfortunately,
58
// extra care must be taken to ensure data are properly 8-byte aligned, and
59
// that data are returned correctly according to Mac OS X ABI specs.
60
61
inline int64_t OSAtomicCompareAndSwap64(
62
    int64_t oldValue, int64_t newValue, int64_t *theValue) {
63
  __asm__ __volatile__(
64
      "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
65
  return 0;
66
}
67
68
inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
69
  __asm__ __volatile__(
70
      "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
71
  return 0;
72
}
73
74
inline int64_t OSAtomicCompareAndSwap64Barrier(
75
    int64_t oldValue, int64_t newValue, int64_t *theValue) {
76
  int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
77
  OSMemoryBarrier();
78
  return prev;
79
}
80
81
inline int64_t OSAtomicAdd64Barrier(
82
    int64_t theAmount, int64_t *theValue) {
83
  int64_t new_val = OSAtomicAdd64(theAmount, theValue);
84
  OSMemoryBarrier();
85
  return new_val;
86
}
87
#endif
88
89
90
namespace base {
91
namespace subtle {
92
93
typedef int32_t Atomic32;
94
typedef int64_t Atomic64;
95
96
681M
inline void MemoryBarrier() {
97
681M
  OSMemoryBarrier();
98
681M
}
99
100
// 32-bit Versions.
101
102
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
103
                                         Atomic32 old_value,
104
36.3M
                                         Atomic32 new_value) {
105
36.3M
  Atomic32 prev_value;
106
36.3M
  do {
107
36.3M
    if (OSAtomicCompareAndSwap32(old_value, new_value,
108
31.4M
                                 const_cast<Atomic32*>(ptr))) {
109
31.4M
      return old_value;
110
31.4M
    }
111
4.85M
    prev_value = *ptr;
112
4.85M
  } while (prev_value == old_value);
113
4.83M
  return prev_value;
114
36.3M
}
115
116
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
117
37.2k
                                         Atomic32 new_value) {
118
37.2k
  Atomic32 old_value;
119
37.2k
  do {
120
37.2k
    old_value = *ptr;
121
37.2k
  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
122
37.2k
                                     const_cast<Atomic32*>(ptr)));
123
37.2k
  return old_value;
124
37.2k
}
125
126
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
127
1.32G
                                       Atomic32 new_value) {
128
1.32G
  Atomic32 old_value;
129
1.32G
  do {
130
1.32G
    old_value = *ptr;
131
1.32G
  } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
132
1.32G
                                            const_cast<Atomic32*>(ptr)));
133
1.32G
  return old_value;
134
1.32G
}
135
136
inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
137
1.32G
                                       Atomic32 new_value) {
138
1.32G
  return Acquire_AtomicExchange(ptr, new_value);
139
1.32G
}
140
141
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
142
6.73M
                                          Atomic32 increment) {
143
6.73M
  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
144
6.73M
}
145
146
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
147
11
                                          Atomic32 increment) {
148
11
  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
149
11
}
150
151
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
152
                                       Atomic32 old_value,
153
1.90G
                                       Atomic32 new_value) {
154
1.90G
  Atomic32 prev_value;
155
1.90G
  do {
156
1.90G
    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
157
1.77G
                                        const_cast<Atomic32*>(ptr))) {
158
1.77G
      return old_value;
159
1.77G
    }
160
124M
    prev_value = *ptr;
161
124M
  } while (prev_value == old_value);
162
123M
  return prev_value;
163
1.90G
}
164
165
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
166
                                       Atomic32 old_value,
167
220M
                                       Atomic32 new_value) {
168
220M
  return Acquire_CompareAndSwap(ptr, old_value, new_value);
169
220M
}
170
171
7.90M
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
172
7.90M
  *ptr = value;
173
7.90M
}
174
175
7
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
176
7
  *ptr = value;
177
7
  MemoryBarrier();
178
7
}
179
180
32.0M
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
181
32.0M
  MemoryBarrier();
182
32.0M
  *ptr = value;
183
32.0M
}
184
185
10.4G
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
186
10.4G
  return *ptr;
187
10.4G
}
188
189
555M
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
190
555M
  Atomic32 value = *ptr;
191
555M
  MemoryBarrier();
192
555M
  return value;
193
555M
}
194
195
41
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
196
41
  MemoryBarrier();
197
41
  return *ptr;
198
41
}
199
200
// 64-bit version
201
202
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
203
                                         Atomic64 old_value,
204
348M
                                         Atomic64 new_value) {
205
348M
  Atomic64 prev_value;
206
348M
  do {
207
348M
    if (OSAtomicCompareAndSwap64(old_value, new_value,
208
334M
                                 const_cast<Atomic64*>(ptr))) {
209
334M
      return old_value;
210
334M
    }
211
14.1M
    prev_value = *ptr;
212
14.1M
  } while (prev_value == old_value);
213
14.1M
  return prev_value;
214
348M
}
215
216
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
217
4
                                         Atomic64 new_value) {
218
4
  Atomic64 old_value;
219
4
  do {
220
4
    old_value = *ptr;
221
4
  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
222
4
                                     const_cast<Atomic64*>(ptr)));
223
4
  return old_value;
224
4
}
225
226
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
227
30
                                       Atomic64 new_value) {
228
30
  Atomic64 old_value;
229
30
  do {
230
30
    old_value = *ptr;
231
30
  } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
232
30
                                            const_cast<Atomic64*>(ptr)));
233
30
  return old_value;
234
30
}
235
236
inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
237
2
                                       Atomic64 new_value) {
238
2
  return Acquire_AtomicExchange(ptr, new_value);
239
2
}
240
241
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
242
3.42G
                                          Atomic64 increment) {
243
3.42G
  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
244
3.42G
}
245
246
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
247
115M
                                        Atomic64 increment) {
248
115M
  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
249
115M
}
250
251
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
252
                                       Atomic64 old_value,
253
44
                                       Atomic64 new_value) {
254
44
  Atomic64 prev_value;
255
44
  do {
256
44
    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
257
36
                                        const_cast<Atomic64*>(ptr))) {
258
36
      return old_value;
259
36
    }
260
8
    prev_value = *ptr;
261
8
  } while (prev_value == old_value);
262
8
  return prev_value;
263
44
}
264
265
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
266
                                       Atomic64 old_value,
267
22
                                       Atomic64 new_value) {
268
  // The lib kern interface does not distinguish between
269
  // Acquire and Release memory barriers; they are equivalent.
270
22
  return Acquire_CompareAndSwap(ptr, old_value, new_value);
271
22
}
272
273
#ifdef __LP64__
274
275
// 64-bit implementation on 64-bit platform
276
277
7.73G
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
278
7.73G
  *ptr = value;
279
7.73G
}
280
281
// Issue the x86 "pause" instruction, which tells the CPU that we
282
// are in a spinlock wait loop and should allow other hyperthreads
283
// to run, not speculate memory access, etc.
284
9.75G
inline void PauseCPU() {
285
#ifdef __x86_64__
286
  __asm__ __volatile__("pause" : : : "memory");
287
#elif defined(__arm64__)
288
9.75G
  __asm__ __volatile__("yield" : : : "memory");
289
#else
290
  #error "PauseCPU is only supported for x86_64 and arm64 on macOS"
291
#endif
292
9.75G
}
293
294
6
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
295
6
  *ptr = value;
296
6
  MemoryBarrier();
297
6
}
298
299
1.99M
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
300
1.99M
  MemoryBarrier();
301
1.99M
  *ptr = value;
302
1.99M
}
303
304
5.69G
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
305
5.69G
  return *ptr;
306
5.69G
}
307
308
5.98M
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
309
5.98M
  Atomic64 value = *ptr;
310
5.98M
  MemoryBarrier();
311
5.98M
  return value;
312
5.98M
}
313
314
86.5M
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
315
86.5M
  MemoryBarrier();
316
86.5M
  return *ptr;
317
86.5M
}
318
319
#else
320
321
// 64-bit implementation on 32-bit platform
322
323
#if defined(__ppc__)
324
325
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
326
  __asm__ __volatile__(
327
      "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
328
}
329
330
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
331
  __asm__ __volatile__(
332
      "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
333
  return 0;
334
}
335
336
#elif defined(__i386__)
337
338
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
339
  __asm__ __volatile__("movq %1, %%mm0\n\t"    // Use mmx reg for 64-bit atomic
340
                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
341
                       "emms\n\t"              // Reset FP registers
342
                       : "=m" (*ptr)
343
                       : "m" (value)
344
                       : // mark the FP stack and mmx registers as clobbered
345
                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
346
                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
347
                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
348
349
}
350
351
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
352
  Atomic64 value;
353
  __asm__ __volatile__("movq %1, %%mm0\n\t"  // Use mmx reg for 64-bit atomic
354
                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
355
                       "emms\n\t"            // Reset FP registers
356
                       : "=m" (value)
357
                       : "m" (*ptr)
358
                       : // mark the FP stack and mmx registers as clobbered
359
                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
360
                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
361
                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
362
363
  return value;
364
}
365
366
#elif defined(__arm__)
367
368
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
369
  int store_failed;
370
  Atomic64 dummy;
371
  __asm__ __volatile__(
372
      "1:\n"
373
      // Dummy load to lock cache line.
374
      "ldrexd  %1, [%3]\n"
375
      "strexd  %0, %2, [%3]\n"
376
      "teq     %0, #0\n"
377
      "bne     1b"
378
      : "=&r" (store_failed), "=&r"(dummy)
379
      : "r"(value), "r" (ptr)
380
      : "cc", "memory");
381
}
382
383
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
384
  Atomic64 res;
385
  __asm__ __volatile__(
386
      "ldrexd   %0, [%1]\n"
387
      "clrex\n"
388
      : "=r" (res)
389
      : "r"(ptr), "Q"(*ptr));
390
  return res;
391
}
392
393
#endif
394
395
396
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
397
  NoBarrier_Store(ptr, value);
398
  MemoryBarrier();
399
}
400
401
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
402
  MemoryBarrier();
403
  NoBarrier_Store(ptr, value);
404
}
405
406
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
407
  Atomic64 value = NoBarrier_Load(ptr);
408
  MemoryBarrier();
409
  return value;
410
}
411
412
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
413
  MemoryBarrier();
414
  return NoBarrier_Load(ptr);
415
}
416
#endif  // __LP64__
417
418
}   // namespace subtle
419
}   // namespace base
420
421
// NOTE(user): The following is also deprecated.  New callers should use
422
// the base::subtle namespace.
423
0
inline void MemoryBarrier() {
424
0
  base::subtle::MemoryBarrier();
425
0
}
426
427
#endif  // YB_GUTIL_ATOMICOPS_INTERNALS_MACOSX_H