YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/yb/gutil/spinlock.cc
Line
Count
Source
1
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
/* Copyright (c) 2006, Google Inc.
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions are
7
 * met:
8
 *
9
 *     * Redistributions of source code must retain the above copyright
10
 * notice, this list of conditions and the following disclaimer.
11
 *     * Redistributions in binary form must reproduce the above
12
 * copyright notice, this list of conditions and the following disclaimer
13
 * in the documentation and/or other materials provided with the
14
 * distribution.
15
 *     * Neither the name of Google Inc. nor the names of its
16
 * contributors may be used to endorse or promote products derived from
17
 * this software without specific prior written permission.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
 *
31
 * The following only applies to changes made to this file as part of YugaByte development.
32
 *
33
 * Portions Copyright (c) YugaByte, Inc.
34
 *
35
 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
36
 * in compliance with the License.  You may obtain a copy of the License at
37
 *
38
 * http://www.apache.org/licenses/LICENSE-2.0
39
 *
40
 * Unless required by applicable law or agreed to in writing, software distributed under the License
41
 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
42
 * or implied.  See the License for the specific language governing permissions and limitations
43
 * under the License.
44
 *
45
 *
46
 * ---
47
 * Author: Sanjay Ghemawat
48
 */
49
50
#include "yb/gutil/spinlock.h"
51
#include "yb/gutil/synchronization_profiling.h"
52
#include "yb/gutil/spinlock_internal.h"
53
#include "yb/gutil/walltime.h"
54
#include "yb/gutil/sysinfo.h"
55
56
namespace base {
57
58
// NOTE on the Lock-state values:
59
//
60
//   kSpinLockFree represents the unlocked state
61
//   kSpinLockHeld represents the locked state with no waiters
62
//
63
// Values greater than kSpinLockHeld represent the locked state with waiters,
64
// where the value is the time the current lock holder had to
65
// wait before obtaining the lock.  The kSpinLockSleeper state is a special
66
// "locked with waiters" state that indicates that a sleeper needs to
67
// be woken, but the thread that just released the lock didn't wait.
68
69
static int adaptive_spin_count = 0;
70
71
const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
72
    base::LINKER_INITIALIZED;
73
74
namespace {
75
struct SpinLock_InitHelper {
76
32.7k
  SpinLock_InitHelper() {
77
    // On multi-cpu machines, spin for longer before yielding
78
    // the processor or sleeping.  Reduces idle time significantly.
79
    // We use the default num CPU value because gflags are initialized after
80
    // the static call to this class.
81
32.7k
    if (base::RawNumCPUs() > 1) {
82
32.7k
      adaptive_spin_count = 1000;
83
32.7k
    }
84
32.7k
  }
85
};
86
87
// Hook into global constructor execution:
88
// We do not do adaptive spinning before that,
89
// but nothing lock-intensive should be going on at that time.
90
static SpinLock_InitHelper init_helper;
91
92
}  // unnamed namespace
93
94
// Monitor the lock to see if its value changes within some time period
95
// (adaptive_spin_count loop iterations).  A timestamp indicating
96
// when the thread initially started waiting for the lock is passed in via
97
// the initial_wait_timestamp value.  The total wait time in cycles for the
98
// lock is returned in the wait_cycles parameter.  The last value read
99
// from the lock is returned from the method.
100
Atomic32 SpinLock::SpinLoop(int64 initial_wait_timestamp,
101
51.4M
                            Atomic32* wait_cycles) {
102
51.4M
  int c = adaptive_spin_count;
103
26.4G
  while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && 
--c > 026.4G
) {
104
26.4G
    base::subtle::PauseCPU();
105
26.4G
  }
106
51.4M
  Atomic32 spin_loop_wait_cycles = CalculateWaitCycles(initial_wait_timestamp);
107
51.4M
  Atomic32 lock_value =
108
51.4M
      base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
109
51.4M
                                           spin_loop_wait_cycles);
110
51.4M
  *wait_cycles = spin_loop_wait_cycles;
111
51.4M
  return lock_value;
112
51.4M
}
113
114
28.1M
void SpinLock::SlowLock() {
115
  // The lock was not obtained initially, so this thread needs to wait for
116
  // it.  Record the current timestamp in the local variable wait_start_time
117
  // so the total wait time can be stored in the lockword once this thread
118
  // obtains the lock.
119
28.1M
  int64 wait_start_time = CycleClock::Now();
120
28.1M
  Atomic32 wait_cycles;
121
28.1M
  Atomic32 lock_value = SpinLoop(wait_start_time, &wait_cycles);
122
123
28.1M
  int lock_wait_call_count = 0;
124
51.6M
  while (lock_value != kSpinLockFree) {
125
    // If the lock is currently held, but not marked as having a sleeper, mark
126
    // it as having a sleeper.
127
23.5M
    if (lock_value == kSpinLockHeld) {
128
      // Here, just "mark" that the thread is going to sleep.  Don't store the
129
      // lock wait time in the lock as that will cause the current lock
130
      // owner to think it experienced contention.
131
10.0M
      lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
132
10.0M
                                                        kSpinLockHeld,
133
10.0M
                                                        kSpinLockSleeper);
134
10.0M
      if (lock_value == kSpinLockHeld) {
135
        // Successfully transitioned to kSpinLockSleeper.  Pass
136
        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
137
        // the last lock_value observed.
138
9.89M
        lock_value = kSpinLockSleeper;
139
9.89M
      } else 
if (178k
lock_value == kSpinLockFree178k
) {
140
        // Lock is free again, so try and acquire it before sleeping.  The
141
        // new lock state will be the number of cycles this thread waited if
142
        // this thread obtains the lock.
143
164k
        lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
144
164k
                                                          kSpinLockFree,
145
164k
                                                          wait_cycles);
146
164k
        continue;  // skip the delay at the end of the loop
147
164k
      }
148
10.0M
    }
149
150
    // Wait for an OS specific delay.
151
23.3M
    yb::base::internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count);
152
    // Spin again after returning from the wait routine to give this thread
153
    // some chance of obtaining the lock.
154
23.3M
    lock_value = SpinLoop(wait_start_time, &wait_cycles);
155
23.3M
  }
156
28.1M
}
157
158
// The wait time for contentionz lock profiling must fit into 32 bits.
159
// However, the lower 32-bits of the cycle counter wrap around too quickly
160
// with high frequency processors, so a right-shift by 7 is performed to
161
// quickly divide the cycles by 128.  Using these 32 bits, reduces the
162
// granularity of time measurement to 128 cycles, and loses track
163
// of wait time for waits greater than 109 seconds on a 5 GHz machine
164
// [(2^32 cycles/5 Ghz)*128 = 109.95 seconds]. Waits this long should be
165
// very rare and the reduced granularity should not be an issue given
166
// processors in the Google fleet operate at a minimum of one billion
167
// cycles/sec.
168
enum { PROFILE_TIMESTAMP_SHIFT = 7 };
169
170
38.0M
void SpinLock::SlowUnlock(uint64 wait_cycles) {
171
38.0M
  yb::base::internal::SpinLockWake(&lockword_, false);  // wake waiter if necessary
172
173
  // Collect contentionz profile info, expanding the wait_cycles back out to
174
  // the full value.  If wait_cycles is <= kSpinLockSleeper, then no wait
175
  // was actually performed, so don't record the wait time.  Note, that the
176
  // CalculateWaitCycles method adds in kSpinLockSleeper cycles
177
  // unconditionally to guarantee the wait time is not kSpinLockFree or
178
  // kSpinLockHeld.  The adding in of these small number of cycles may
179
  // overestimate the contention by a slight amount 50% of the time.  However,
180
  // if this code tried to correct for that addition by subtracting out the
181
  // kSpinLockSleeper amount that would underestimate the contention slightly
182
  // 50% of the time.  Both ways get the wrong answer, so the code
183
  // overestimates to be more conservative. Overestimating also makes the code
184
  // a little simpler.
185
  //
186
38.0M
  if (wait_cycles > kSpinLockSleeper) {
187
11.6M
    gutil::SubmitSpinLockProfileData(this,
188
11.6M
                                     wait_cycles << PROFILE_TIMESTAMP_SHIFT);
189
11.6M
  }
190
38.0M
}
191
192
51.1M
inline int32 SpinLock::CalculateWaitCycles(int64 wait_start_time) {
193
51.1M
  int32 wait_cycles = static_cast<int32>(
194
51.1M
      (CycleClock::Now() - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT);
195
  // The number of cycles waiting for the lock is used as both the
196
  // wait_cycles and lock value, so it can't be kSpinLockFree or
197
  // kSpinLockHeld.  Make sure the value returned is at least
198
  // kSpinLockSleeper.
199
51.1M
  wait_cycles |= kSpinLockSleeper;
200
51.1M
  return wait_cycles;
201
51.1M
}
202
203
} // namespace base