YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/docdb/shared_lock_manager.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) YugaByte, Inc.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
4
// in compliance with the License.  You may obtain a copy of the License at
5
//
6
// http://www.apache.org/licenses/LICENSE-2.0
7
//
8
// Unless required by applicable law or agreed to in writing, software distributed under the License
9
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
10
// or implied.  See the License for the specific language governing permissions and limitations
11
// under the License.
12
//
13
14
#include "yb/docdb/shared_lock_manager.h"
15
16
#include <array>
17
#include <condition_variable>
18
#include <mutex>
19
#include <unordered_map>
20
#include <vector>
21
22
#include <boost/range/adaptor/reversed.hpp>
23
#include <glog/logging.h>
24
25
#include "yb/docdb/lock_batch.h"
26
27
#include "yb/util/enums.h"
28
#include "yb/util/ref_cnt_buffer.h"
29
#include "yb/util/scope_exit.h"
30
#include "yb/util/trace.h"
31
32
using std::string;
33
34
namespace yb {
35
namespace docdb {
36
37
namespace {
38
39
// Lock state stores number of locks acquires for each intent type.
40
// Count for each intent type resides in sequential bits (block) in lock state.
41
// For example count of lock on particular intent type could be received as:
42
// (lock_state >> kIntentTypeShift[type]) & kSingleIntentMask.
43
44
// We have 128 bits in LockState and 6 types of intents. So 21 bits is max number of bits
45
// that we could reserve for block of single intent type.
46
const size_t kIntentTypeBits = 16;
47
const LockState kSingleIntentMask = (static_cast<LockState>(1) << kIntentTypeBits) - 1;
48
49
1.44M
bool IntentTypesConflict(IntentType lhs, IntentType rhs) {
50
1.44M
  auto lhs_value = to_underlying(lhs);
51
1.44M
  auto rhs_value = to_underlying(rhs);
52
  // The rules are the following:
53
  // 1) At least one intent should be strong for conflict.
54
  // 2) Read and write conflict only with opposite type.
55
1.44M
  return ((lhs_value & kStrongIntentFlag) || (rhs_value & kStrongIntentFlag)) &&
56
1.08M
         ((lhs_value & kWriteIntentFlag) != (rhs_value & kWriteIntentFlag));
57
1.44M
}
58
59
599k
LockState IntentTypeMask(IntentType intent_type) {
60
599k
  return kSingleIntentMask << (to_underlying(intent_type) * kIntentTypeBits);
61
599k
}
62
63
// Generate conflict mask for all possible subsets of intent type set.
64
11.3k
std::array<LockState, kIntentTypeSetMapSize> GenerateConflicts() {
65
11.3k
  std::array<LockState, kIntentTypeSetMapSize> result;
66
192k
  for (size_t idx = 0; idx != kIntentTypeSetMapSize; ++idx) {
67
181k
    result[idx] = 0;
68
362k
    for (auto intent_type : IntentTypeSet(idx)) {
69
1.44M
      for (auto other_intent_type : kIntentTypeList) {
70
1.44M
        if (IntentTypesConflict(intent_type, other_intent_type)) {
71
543k
          result[idx] |= IntentTypeMask(other_intent_type);
72
543k
        }
73
1.44M
      }
74
362k
    }
75
181k
  }
76
11.3k
  return result;
77
11.3k
}
78
79
// Generate array for all possible subsets of intent type set.
80
// The entry is combination of single_intent_mask for intents from set.
81
22.6k
std::array<LockState, kIntentTypeSetMapSize> GenerateByMask(LockState single_intent_mask) {
82
22.6k
  DCHECK_EQ(single_intent_mask & kSingleIntentMask, single_intent_mask);
83
22.6k
  std::array<LockState, kIntentTypeSetMapSize> result;
84
384k
  for (size_t idx = 0; idx != kIntentTypeSetMapSize; ++idx) {
85
362k
    result[idx] = 0;
86
724k
    for (auto intent_type : IntentTypeSet(idx)) {
87
724k
      result[idx] |= single_intent_mask << (to_underlying(intent_type) * kIntentTypeBits);
88
724k
    }
89
362k
  }
90
22.6k
  return result;
91
22.6k
}
92
93
const std::array<LockState, kIntentTypeSetMapSize> kIntentTypeSetAdd = GenerateByMask(1);
94
95
} // namespace
96
97
0
bool IntentTypeSetsConflict(IntentTypeSet lhs, IntentTypeSet rhs) {
98
0
  for (auto intent1 : lhs) {
99
0
    for (auto intent2 : rhs) {
100
0
      if (IntentTypesConflict(intent1, intent2)) {
101
0
        return true;
102
0
      }
103
0
    }
104
0
  }
105
0
  return false;
106
0
}
107
108
struct LockedBatchEntry {
109
  // Taken only for short duration, with no blocking wait.
110
  mutable std::mutex mutex;
111
112
  std::condition_variable cond_var;
113
114
  // Refcounting for garbage collection. Can only be used while the global mutex is locked.
115
  // Global mutex resides in lock manager and the same for all LockBatchEntries.
116
  size_t ref_count = 0;
117
118
  // Number of holders for each type
119
  std::atomic<LockState> num_holding{0};
120
121
  std::atomic<size_t> num_waiters{0};
122
123
  MUST_USE_RESULT bool Lock(IntentTypeSet lock, CoarseTimePoint deadline);
124
125
  void Unlock(IntentTypeSet lock);
126
127
0
  std::string ToString() const {
128
0
    std::lock_guard<std::mutex> lock(mutex);
129
0
    return Format("{ ref_count: $0 num_holding: $1 num_waiters: $2 }",
130
0
                  ref_count, num_holding.load(std::memory_order_acquire),
131
0
                  num_waiters.load(std::memory_order_acquire));
132
0
  }
133
};
134
135
class SharedLockManager::Impl {
136
 public:
137
  MUST_USE_RESULT bool Lock(LockBatchEntries* key_to_intent_type, CoarseTimePoint deadline);
138
  void Unlock(const LockBatchEntries& key_to_intent_type);
139
140
48.1k
  ~Impl() {
141
48.1k
    std::lock_guard<std::mutex> lock(global_mutex_);
142
76
    LOG_IF(DFATAL, !locks_.empty()) << "Locks not empty in dtor: " << yb::ToString(locks_);
143
48.1k
  }
144
145
 private:
146
  typedef std::unordered_map<RefCntPrefix, LockedBatchEntry*, RefCntPrefixHash> LockEntryMap;
147
148
  // Make sure the entries exist in the locks_ map and return pointers so we can access
149
  // them without holding the global lock. Returns a vector with pointers in the same order
150
  // as the keys in the batch.
151
  void Reserve(LockBatchEntries* batch);
152
153
  // Update refcounts and maybe collect garbage.
154
  void Cleanup(const LockBatchEntries& key_to_intent_type);
155
156
  // The global mutex should be taken only for very short duration, with no blocking wait.
157
  std::mutex global_mutex_;
158
159
  LockEntryMap locks_ GUARDED_BY(global_mutex_);
160
  // Cache of lock entries, to avoid allocation/deallocation of heavy LockedBatchEntry.
161
  std::vector<std::unique_ptr<LockedBatchEntry>> lock_entries_ GUARDED_BY(global_mutex_);
162
  std::vector<LockedBatchEntry*> free_lock_entries_ GUARDED_BY(global_mutex_);
163
};
164
165
const std::array<LockState, kIntentTypeSetMapSize> kIntentTypeSetMask = GenerateByMask(
166
    kSingleIntentMask);
167
168
const std::array<LockState, kIntentTypeSetMapSize> kIntentTypeSetConflicts = GenerateConflicts();
169
170
0
std::string SharedLockManager::ToString(const LockState& state) {
171
0
  std::string result = "{";
172
0
  bool first = true;
173
0
  for (auto type : kIntentTypeList) {
174
0
    if ((state & IntentTypeMask(type)) != 0) {
175
0
      if (first) {
176
0
        first = false;
177
0
      } else {
178
0
        result += ", ";
179
0
      }
180
0
      result += docdb::ToString(type);
181
0
    }
182
0
  }
183
0
  result += "}";
184
0
  return result;
185
0
}
186
187
14.6M
bool LockedBatchEntry::Lock(IntentTypeSet lock_type, CoarseTimePoint deadline) {
188
14.6M
  size_t type_idx = lock_type.ToUIntPtr();
189
14.6M
  auto& num_holding = this->num_holding;
190
14.6M
  auto old_value = num_holding.load(std::memory_order_acquire);
191
14.6M
  auto add = kIntentTypeSetAdd[type_idx];
192
14.9M
  for (;;) {
193
14.9M
    if ((old_value & kIntentTypeSetConflicts[type_idx]) == 0) {
194
14.6M
      auto new_value = old_value + add;
195
14.6M
      if (num_holding.compare_exchange_weak(old_value, new_value, std::memory_order_acq_rel)) {
196
14.6M
        return true;
197
14.6M
      }
198
18.4E
      continue;
199
18.4E
    }
200
229k
    num_waiters.fetch_add(1, std::memory_order_release);
201
228k
    auto se = ScopeExit([this] {
202
228k
      num_waiters.fetch_sub(1, std::memory_order_release);
203
228k
    });
204
229k
    std::unique_lock<std::mutex> lock(mutex);
205
229k
    old_value = num_holding.load(std::memory_order_acquire);
206
229k
    if ((old_value & kIntentTypeSetConflicts[type_idx]) != 0) {
207
176k
      if (deadline != CoarseTimePoint::max()) {
208
176k
        if (cond_var.wait_until(lock, deadline) == std::cv_status::timeout) {
209
46
          return false;
210
46
        }
211
18.4E
      } else {
212
18.4E
        cond_var.wait(lock);
213
18.4E
      }
214
176k
    }
215
229k
  }
216
14.6M
}
217
218
14.7M
void LockedBatchEntry::Unlock(IntentTypeSet lock_types) {
219
14.7M
  size_t type_idx = lock_types.ToUIntPtr();
220
14.7M
  auto sub = kIntentTypeSetAdd[type_idx];
221
222
  // Have to emulate fetch_sub here, because GCC 5.5 don't have it for int128
223
14.7M
  auto old_state = num_holding.load(std::memory_order_acquire);
224
14.7M
  LockState new_state;
225
14.7M
  for (;;) {
226
14.7M
    new_state = old_state - sub;
227
14.7M
    if (num_holding.compare_exchange_weak(old_state, new_state, std::memory_order_acq_rel)) {
228
14.7M
      break;
229
14.7M
    }
230
14.7M
  }
231
232
14.7M
  if (!num_waiters.load(std::memory_order_acquire)) {
233
14.6M
    return;
234
14.6M
  }
235
236
55.1k
  bool has_zero = false;
237
56.5k
  for (auto intent_type : lock_types) {
238
56.5k
    if (!(new_state & IntentTypeMask(intent_type))) {
239
48.8k
      has_zero = true;
240
48.8k
      break;
241
48.8k
    }
242
56.5k
  }
243
244
  // At least one of counters should become 0 to unblock waiting locks.
245
55.1k
  if (!has_zero) {
246
7.70k
    return;
247
7.70k
  }
248
249
47.4k
  {
250
    // Lock/unlock mutex as a barrier for Lock.
251
    // So we don't unlock and notify between check and wait in Lock.
252
47.4k
    std::lock_guard<std::mutex> lock(mutex);
253
47.4k
  }
254
255
47.4k
  cond_var.notify_all();
256
47.4k
}
257
258
1.74M
bool SharedLockManager::Impl::Lock(LockBatchEntries* key_to_intent_type, CoarseTimePoint deadline) {
259
1.74M
  TRACE("Locking a batch of $0 keys", key_to_intent_type->size());
260
1.74M
  Reserve(key_to_intent_type);
261
16.4M
  for (auto it = key_to_intent_type->begin(); it != key_to_intent_type->end(); ++it) {
262
14.6M
    const auto& key_and_intent_type = *it;
263
14.6M
    const auto intent_types = key_and_intent_type.intent_types;
264
607
    VLOG(4) << "Locking " << yb::ToString(intent_types) << ": "
265
607
            << key_and_intent_type.key.as_slice().ToDebugHexString();
266
14.6M
    if (!key_and_intent_type.locked->Lock(intent_types, deadline)) {
267
92
      while (it != key_to_intent_type->begin()) {
268
46
        --it;
269
46
        it->locked->Unlock(it->intent_types);
270
46
      }
271
46
      Cleanup(*key_to_intent_type);
272
46
      return false;
273
46
    }
274
14.6M
  }
275
1.74M
  TRACE("Acquired a lock batch of $0 keys", key_to_intent_type->size());
276
277
1.74M
  return true;
278
1.74M
}
279
280
1.74M
void SharedLockManager::Impl::Reserve(LockBatchEntries* key_to_intent_type) {
281
1.74M
  std::lock_guard<std::mutex> lock(global_mutex_);
282
14.6M
  for (auto& key_and_intent_type : *key_to_intent_type) {
283
14.6M
    auto& value = locks_[key_and_intent_type.key];
284
14.6M
    if (!value) {
285
13.5M
      if (!free_lock_entries_.empty()) {
286
12.9M
        value = free_lock_entries_.back();
287
12.9M
        free_lock_entries_.pop_back();
288
600k
      } else {
289
600k
        lock_entries_.emplace_back(std::make_unique<LockedBatchEntry>());
290
600k
        value = lock_entries_.back().get();
291
600k
      }
292
13.5M
    }
293
14.6M
    value->ref_count++;
294
14.6M
    key_and_intent_type.locked = value;
295
14.6M
  }
296
1.74M
}
297
298
1.74M
void SharedLockManager::Impl::Unlock(const LockBatchEntries& key_to_intent_type) {
299
1.74M
  TRACE("Unlocking a batch of $0 keys", key_to_intent_type.size());
300
301
14.7M
  for (const auto& key_and_intent_type : boost::adaptors::reverse(key_to_intent_type)) {
302
14.7M
    key_and_intent_type.locked->Unlock(key_and_intent_type.intent_types);
303
14.7M
  }
304
305
1.74M
  Cleanup(key_to_intent_type);
306
1.74M
}
307
308
1.74M
void SharedLockManager::Impl::Cleanup(const LockBatchEntries& key_to_intent_type) {
309
1.74M
  std::lock_guard<std::mutex> lock(global_mutex_);
310
14.7M
  for (const auto& item : key_to_intent_type) {
311
14.7M
    if (--(item.locked->ref_count) == 0) {
312
13.5M
      locks_.erase(item.key);
313
13.5M
      free_lock_entries_.push_back(item.locked);
314
13.5M
    }
315
14.7M
  }
316
1.74M
}
317
318
89.2k
SharedLockManager::SharedLockManager() : impl_(new Impl) {
319
89.2k
}
320
321
48.1k
SharedLockManager::~SharedLockManager() {}
322
323
1.74M
bool SharedLockManager::Lock(LockBatchEntries* key_to_intent_type, CoarseTimePoint deadline) {
324
1.74M
  return impl_->Lock(key_to_intent_type, deadline);
325
1.74M
}
326
327
1.74M
void SharedLockManager::Unlock(const LockBatchEntries& key_to_intent_type) {
328
1.74M
  impl_->Unlock(key_to_intent_type);
329
1.74M
}
330
331
}  // namespace docdb
332
}  // namespace yb