YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/util/locks.h
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
//
18
// The following only applies to changes made to this file as part of YugaByte development.
19
//
20
// Portions Copyright (c) YugaByte, Inc.
21
//
22
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
23
// in compliance with the License.  You may obtain a copy of the License at
24
//
25
// http://www.apache.org/licenses/LICENSE-2.0
26
//
27
// Unless required by applicable law or agreed to in writing, software distributed under the License
28
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
29
// or implied.  See the License for the specific language governing permissions and limitations
30
// under the License.
31
//
32
#ifndef YB_UTIL_LOCKS_H
33
#define YB_UTIL_LOCKS_H
34
35
#include <algorithm>
36
#include <mutex>
37
38
#include <glog/logging.h>
39
40
#include "yb/gutil/atomicops.h"
41
#include "yb/gutil/dynamic_annotations.h"
42
#include "yb/gutil/macros.h"
43
#include "yb/gutil/port.h"
44
#include "yb/gutil/spinlock.h"
45
#include "yb/gutil/sysinfo.h"
46
#include "yb/gutil/thread_annotations.h"
47
48
#include "yb/util/errno.h"
49
#include "yb/util/rw_semaphore.h"
50
51
namespace yb {
52
53
using base::subtle::Acquire_CompareAndSwap;
54
using base::subtle::NoBarrier_Load;
55
using base::subtle::Release_Store;
56
57
// Wrapper around the Google SpinLock class to adapt it to the method names
58
// expected by Boost.
59
class CAPABILITY("mutex") simple_spinlock {
60
 public:
61
154M
  simple_spinlock() {}
62
63
1.32G
  void lock() ACQUIRE() {
64
1.32G
    l_.Lock();
65
1.32G
  }
66
67
1.32G
  void unlock() RELEASE() {
68
1.32G
    l_.Unlock();
69
1.32G
  }
70
71
628
  bool try_lock() TRY_ACQUIRE(true) {
72
628
    return l_.TryLock();
73
628
  }
74
75
  // Return whether the lock is currently held.
76
  //
77
  // This state can change at any instant, so this is only really useful
78
  // for assertions where you expect to hold the lock. The success of
79
  // such an assertion isn't a guarantee that the current thread is the
80
  // holder, but the failure of such an assertion _is_ a guarantee that
81
  // the current thread is _not_ holding the lock!
82
105M
  bool is_locked() {
83
105M
    return l_.IsHeld();
84
105M
  }
85
86
 private:
87
  base::SpinLock l_;
88
89
  DISALLOW_COPY_AND_ASSIGN(simple_spinlock);
90
};
91
92
struct padded_spinlock : public simple_spinlock {
93
  static constexpr size_t kPaddingSize =
94
      CACHELINE_SIZE - (sizeof(simple_spinlock) % CACHELINE_SIZE);
95
  char padding[kPaddingSize];
96
};
97
98
// Reader-writer lock.
99
// This is functionally equivalent to rw_semaphore in rw_semaphore.h, but should be
100
// used whenever the lock is expected to only be acquired on a single thread.
101
// It adds TSAN annotations which will detect misuse of the lock, but those
102
// annotations also assume that the same thread the takes the lock will unlock it.
103
//
104
// See rw_semaphore.h for documentation on the individual methods where unclear.
105
class CAPABILITY("mutex") rw_spinlock  {
106
 public:
107
1.92M
  rw_spinlock() {
108
1.92M
    ANNOTATE_RWLOCK_CREATE(this);
109
1.92M
  }
110
754k
  ~rw_spinlock() {
111
754k
    ANNOTATE_RWLOCK_DESTROY(this);
112
754k
  }
113
114
219M
  void lock_shared() {
115
219M
    sem_.lock_shared();
116
219M
    ANNOTATE_RWLOCK_ACQUIRED(this, 0);
117
219M
  }
118
119
219M
  void unlock_shared() {
120
219M
    ANNOTATE_RWLOCK_RELEASED(this, 0);
121
219M
    sem_.unlock_shared();
122
219M
  }
123
124
0
  bool try_lock() {
125
0
    bool ret = sem_.try_lock();
126
0
    if (ret) {
127
0
      ANNOTATE_RWLOCK_ACQUIRED(this, 1);
128
0
    }
129
0
    return ret;
130
0
  }
131
132
31.1M
  void lock() {
133
31.1M
    sem_.lock();
134
31.1M
    ANNOTATE_RWLOCK_ACQUIRED(this, 1);
135
31.1M
  }
136
137
31.1M
  void unlock() {
138
31.1M
    ANNOTATE_RWLOCK_RELEASED(this, 1);
139
31.1M
    sem_.unlock();
140
31.1M
  }
141
142
0
  bool is_write_locked() const {
143
0
    return sem_.is_write_locked();
144
0
  }
145
146
4
  bool is_locked() const {
147
4
    return sem_.is_locked();
148
4
  }
149
150
 private:
151
  rw_semaphore sem_;
152
};
153
154
// A reader-writer lock implementation which is biased for use cases where
155
// the write lock is taken infrequently, but the read lock is used often.
156
//
157
// Internally, this creates N underlying mutexes, one per CPU. When a thread
158
// wants to lock in read (shared) mode, it locks only its own CPU's mutex. When it
159
// wants to lock in write (exclusive) mode, it locks all CPU's mutexes.
160
//
161
// This means that in the read-mostly case, different readers will not cause any
162
// cacheline contention.
163
//
164
// Usage:
165
//   percpu_rwlock mylock;
166
//
167
//   // Lock shared:
168
//   {
169
//     SharedLock<rw_spinlock> lock(mylock.get_lock());
170
//     ...
171
//   }
172
//
173
//   // Lock exclusive:
174
//
175
//   {
176
//     boost::lock_guard<percpu_rwlock> lock(mylock);
177
//     ...
178
//   }
179
class percpu_rwlock {
180
 public:
181
110k
  percpu_rwlock() {
182
110k
    errno = 0;
183
110k
    n_cpus_ = base::MaxCPUIndex() + 1;
184
0
    CHECK_EQ(errno, 0) << ErrnoToString(errno);
185
110k
    CHECK_GT(n_cpus_, 0);
186
110k
    locks_ = new padded_lock[n_cpus_];
187
110k
  }
188
189
51.7k
  ~percpu_rwlock() {
190
51.7k
    delete [] locks_;
191
51.7k
  }
192
193
34.8M
  rw_spinlock &get_lock() {
194
34.8M
#if defined(__APPLE__)
195
    // OSX doesn't have a way to get the CPU, so we'll pick a random one.
196
34.8M
    int cpu = reinterpret_cast<uintptr_t>(this) % n_cpus_;
197
#else
198
    int cpu = sched_getcpu();
199
    if (cpu < 0) {
200
      LOG(FATAL) << ErrnoToString(errno);
201
    }
202
    CHECK_LT(cpu, n_cpus_);
203
#endif  // defined(__APPLE__)
204
34.8M
    return locks_[cpu].lock;
205
34.8M
  }
206
207
0
  bool try_lock() {
208
0
    for (int i = 0; i < n_cpus_; i++) {
209
0
      if (!locks_[i].lock.try_lock()) {
210
0
        while (i--) {
211
0
          locks_[i].lock.unlock();
212
0
        }
213
0
        return false;
214
0
      }
215
0
    }
216
0
    return true;
217
0
  }
218
219
  // Return true if this lock is held on any CPU.
220
  // See simple_spinlock::is_locked() for details about where this is useful.
221
0
  bool is_locked() const {
222
0
    for (int i = 0; i < n_cpus_; i++) {
223
0
      if (locks_[i].lock.is_locked()) return true;
224
0
    }
225
0
    return false;
226
0
  }
227
228
279k
  void lock() {
229
3.07M
    for (int i = 0; i < n_cpus_; i++) {
230
2.79M
      locks_[i].lock.lock();
231
2.79M
    }
232
279k
  }
233
234
279k
  void unlock() {
235
3.07M
    for (int i = 0; i < n_cpus_; i++) {
236
2.79M
      locks_[i].lock.unlock();
237
2.79M
    }
238
279k
  }
239
240
  // Returns the memory usage of this object without the object itself. Should
241
  // be used when embedded inside another object.
242
  size_t memory_footprint_excluding_this() const;
243
244
  // Returns the memory usage of this object including the object itself.
245
  // Should be used when allocated on the heap.
246
  size_t memory_footprint_including_this() const;
247
248
 private:
249
  struct padded_lock {
250
    rw_spinlock lock;
251
    static constexpr size_t kPaddingSize = CACHELINE_SIZE - (sizeof(rw_spinlock) % CACHELINE_SIZE);
252
    char padding[kPaddingSize];
253
  };
254
255
  int n_cpus_;
256
  padded_lock *locks_;
257
};
258
259
// Simple implementation of the SharedLock API, which is not available in
260
// the standard library until C++14. Defers error checking to the underlying
261
// mutex.
262
263
template <typename Mutex>
264
class shared_lock {
265
 public:
266
  shared_lock()
267
6.29M
      : m_(nullptr) {
268
6.29M
  }
269
270
  explicit shared_lock(Mutex& m) // NOLINT
271
498k
      : m_(&m) {
272
498k
    m_->lock_shared();
273
498k
  }
_ZN2yb11shared_lockINS_7RWMutexEEC2ERS1_
Line
Count
Source
271
77.9k
      : m_(&m) {
272
77.9k
    m_->lock_shared();
273
77.9k
  }
_ZN2yb11shared_lockINS_11rw_spinlockEEC2ERS1_
Line
Count
Source
271
420k
      : m_(&m) {
272
420k
    m_->lock_shared();
273
420k
  }
274
275
  shared_lock(Mutex& m, std::try_to_lock_t t) // NOLINT
276
14.8M
      : m_(nullptr) {
277
14.8M
    if (m.try_lock_shared()) {
278
6.33M
      m_ = &m;
279
6.33M
    }
280
14.8M
  }
281
282
17.5M
  bool owns_lock() const {
283
17.5M
    return m_;
284
17.5M
  }
285
286
6.29M
  void swap(shared_lock& other) {
287
6.29M
    std::swap(m_, other.m_);
288
6.29M
  }
289
290
21.6M
  ~shared_lock() {
291
21.6M
    if (m_ != nullptr) {
292
6.83M
      m_->unlock_shared();
293
6.83M
    }
294
21.6M
  }
_ZN2yb11shared_lockINS_7RWMutexEED2Ev
Line
Count
Source
290
21.2M
  ~shared_lock() {
291
21.2M
    if (m_ != nullptr) {
292
6.40M
      m_->unlock_shared();
293
6.40M
    }
294
21.2M
  }
_ZN2yb11shared_lockINS_11rw_spinlockEED2Ev
Line
Count
Source
290
420k
  ~shared_lock() {
291
420k
    if (m_ != nullptr) {
292
420k
      m_->unlock_shared();
293
420k
    }
294
420k
  }
295
296
 private:
297
  Mutex* m_;
298
  DISALLOW_COPY_AND_ASSIGN(shared_lock<Mutex>);
299
};
300
301
template <class Container>
302
auto ToVector(const Container& container, std::mutex* mutex) {
303
  std::vector<typename Container::value_type> result;
304
  {
305
    std::lock_guard<std::mutex> lock(*mutex);
306
    result.reserve(container.size());
307
    result.assign(container.begin(), container.end());
308
  }
309
  return result;
310
}
311
312
template <class Mutex, class Rep, class Period>
313
10.2M
std::unique_lock<Mutex> LockMutex(Mutex* mutex, std::chrono::duration<Rep, Period> duration) {
314
10.2M
  if (duration == std::chrono::duration<Rep, Period>::max()) {
315
106
    return std::unique_lock<Mutex>(*mutex);
316
106
  }
317
318
10.2M
  return std::unique_lock<Mutex>(*mutex, duration);
319
10.2M
}
320
321
template <class Mutex, class Clock, class Duration>
322
std::unique_lock<Mutex> LockMutex(Mutex* mutex, std::chrono::time_point<Clock, Duration> time) {
323
  if (time == std::chrono::time_point<Clock, Duration>::max()) {
324
    return std::unique_lock<Mutex>(*mutex);
325
  }
326
327
  return std::unique_lock<Mutex>(*mutex, time);
328
}
329
330
template <class Lock>
331
class ReverseLock {
332
 public:
333
  ReverseLock(const ReverseLock&) = delete;
334
  void operator=(const ReverseLock&) = delete;
335
336
14.8k
  explicit ReverseLock(Lock& lock) : lock_(lock) {
337
14.8k
    lock_.unlock();
338
14.8k
  }
_ZN2yb11ReverseLockINS_10UniqueLockINS_13percpu_rwlockEEEEC2ERS3_
Line
Count
Source
336
115
  explicit ReverseLock(Lock& lock) : lock_(lock) {
337
115
    lock_.unlock();
338
115
  }
_ZN2yb11ReverseLockINS_10UniqueLockINSt3__15mutexEEEEC2ERS4_
Line
Count
Source
336
14.7k
  explicit ReverseLock(Lock& lock) : lock_(lock) {
337
14.7k
    lock_.unlock();
338
14.7k
  }
339
340
14.8k
  ~ReverseLock() {
341
14.8k
    lock_.lock();
342
14.8k
  }
_ZN2yb11ReverseLockINS_10UniqueLockINS_13percpu_rwlockEEEED2Ev
Line
Count
Source
340
115
  ~ReverseLock() {
341
115
    lock_.lock();
342
115
  }
_ZN2yb11ReverseLockINS_10UniqueLockINSt3__15mutexEEEED2Ev
Line
Count
Source
340
14.7k
  ~ReverseLock() {
341
14.7k
    lock_.lock();
342
14.7k
  }
343
344
 private:
345
  Lock& lock_;
346
};
347
348
} // namespace yb
349
350
#endif // YB_UTIL_LOCKS_H