YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/yb/docdb/shared_lock_manager.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) YugaByte, Inc.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
4
// in compliance with the License.  You may obtain a copy of the License at
5
//
6
// http://www.apache.org/licenses/LICENSE-2.0
7
//
8
// Unless required by applicable law or agreed to in writing, software distributed under the License
9
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
10
// or implied.  See the License for the specific language governing permissions and limitations
11
// under the License.
12
//
13
14
#include "yb/docdb/shared_lock_manager.h"
15
16
#include <array>
17
#include <condition_variable>
18
#include <mutex>
19
#include <unordered_map>
20
#include <vector>
21
22
#include <boost/range/adaptor/reversed.hpp>
23
#include <glog/logging.h>
24
25
#include "yb/docdb/lock_batch.h"
26
27
#include "yb/util/enums.h"
28
#include "yb/util/ref_cnt_buffer.h"
29
#include "yb/util/scope_exit.h"
30
#include "yb/util/trace.h"
31
32
using std::string;
33
34
namespace yb {
35
namespace docdb {
36
37
namespace {
38
39
// Lock state stores number of locks acquires for each intent type.
40
// Count for each intent type resides in sequential bits (block) in lock state.
41
// For example count of lock on particular intent type could be received as:
42
// (lock_state >> kIntentTypeShift[type]) & kSingleIntentMask.
43
44
// We have 128 bits in LockState and 6 types of intents. So 21 bits is max number of bits
45
// that we could reserve for block of single intent type.
46
const size_t kIntentTypeBits = 16;
47
const LockState kSingleIntentMask = (static_cast<LockState>(1) << kIntentTypeBits) - 1;
48
49
2.15M
bool IntentTypesConflict(IntentType lhs, IntentType rhs) {
50
2.15M
  auto lhs_value = to_underlying(lhs);
51
2.15M
  auto rhs_value = to_underlying(rhs);
52
  // The rules are the following:
53
  // 1) At least one intent should be strong for conflict.
54
  // 2) Read and write conflict only with opposite type.
55
2.15M
  return ((lhs_value & kStrongIntentFlag) || 
(rhs_value & kStrongIntentFlag)1.07M
) &&
56
2.15M
         
((lhs_value & kWriteIntentFlag) != (rhs_value & kWriteIntentFlag))1.61M
;
57
2.15M
}
58
59
914k
LockState IntentTypeMask(IntentType intent_type) {
60
914k
  return kSingleIntentMask << (to_underlying(intent_type) * kIntentTypeBits);
61
914k
}
62
63
// Generate conflict mask for all possible subsets of intent type set.
64
16.8k
std::array<LockState, kIntentTypeSetMapSize> GenerateConflicts() {
65
16.8k
  std::array<LockState, kIntentTypeSetMapSize> result;
66
285k
  for (size_t idx = 0; idx != kIntentTypeSetMapSize; 
++idx268k
) {
67
268k
    result[idx] = 0;
68
537k
    for (auto intent_type : IntentTypeSet(idx)) {
69
2.15M
      for (auto other_intent_type : kIntentTypeList) {
70
2.15M
        if (IntentTypesConflict(intent_type, other_intent_type)) {
71
806k
          result[idx] |= IntentTypeMask(other_intent_type);
72
806k
        }
73
2.15M
      }
74
537k
    }
75
268k
  }
76
16.8k
  return result;
77
16.8k
}
78
79
// Generate array for all possible subsets of intent type set.
80
// The entry is combination of single_intent_mask for intents from set.
81
33.6k
std::array<LockState, kIntentTypeSetMapSize> GenerateByMask(LockState single_intent_mask) {
82
33.6k
  DCHECK_EQ(single_intent_mask & kSingleIntentMask, single_intent_mask);
83
33.6k
  std::array<LockState, kIntentTypeSetMapSize> result;
84
571k
  for (size_t idx = 0; idx != kIntentTypeSetMapSize; 
++idx537k
) {
85
537k
    result[idx] = 0;
86
1.07M
    for (auto intent_type : IntentTypeSet(idx)) {
87
1.07M
      result[idx] |= single_intent_mask << (to_underlying(intent_type) * kIntentTypeBits);
88
1.07M
    }
89
537k
  }
90
33.6k
  return result;
91
33.6k
}
92
93
const std::array<LockState, kIntentTypeSetMapSize> kIntentTypeSetAdd = GenerateByMask(1);
94
95
} // namespace
96
97
256
bool IntentTypeSetsConflict(IntentTypeSet lhs, IntentTypeSet rhs) {
98
344
  for (auto intent1 : lhs) {
99
557
    for (auto intent2 : rhs) {
100
557
      if (IntentTypesConflict(intent1, intent2)) {
101
192
        return true;
102
192
      }
103
557
    }
104
344
  }
105
64
  return false;
106
256
}
107
108
struct LockedBatchEntry {
109
  // Taken only for short duration, with no blocking wait.
110
  mutable std::mutex mutex;
111
112
  std::condition_variable cond_var;
113
114
  // Refcounting for garbage collection. Can only be used while the global mutex is locked.
115
  // Global mutex resides in lock manager and the same for all LockBatchEntries.
116
  size_t ref_count = 0;
117
118
  // Number of holders for each type
119
  std::atomic<LockState> num_holding{0};
120
121
  std::atomic<size_t> num_waiters{0};
122
123
  MUST_USE_RESULT bool Lock(IntentTypeSet lock, CoarseTimePoint deadline);
124
125
  void Unlock(IntentTypeSet lock);
126
127
0
  std::string ToString() const {
128
0
    std::lock_guard<std::mutex> lock(mutex);
129
0
    return Format("{ ref_count: $0 num_holding: $1 num_waiters: $2 }",
130
0
                  ref_count, num_holding.load(std::memory_order_acquire),
131
0
                  num_waiters.load(std::memory_order_acquire));
132
0
  }
133
};
134
135
class SharedLockManager::Impl {
136
 public:
137
  MUST_USE_RESULT bool Lock(LockBatchEntries* key_to_intent_type, CoarseTimePoint deadline);
138
  void Unlock(const LockBatchEntries& key_to_intent_type);
139
140
75.9k
  ~Impl() {
141
75.9k
    std::lock_guard<std::mutex> lock(global_mutex_);
142
75.9k
    LOG_IF
(DFATAL, !locks_.empty()) << "Locks not empty in dtor: " << yb::ToString(locks_)192
;
143
75.9k
  }
144
145
 private:
146
  typedef std::unordered_map<RefCntPrefix, LockedBatchEntry*, RefCntPrefixHash> LockEntryMap;
147
148
  // Make sure the entries exist in the locks_ map and return pointers so we can access
149
  // them without holding the global lock. Returns a vector with pointers in the same order
150
  // as the keys in the batch.
151
  void Reserve(LockBatchEntries* batch);
152
153
  // Update refcounts and maybe collect garbage.
154
  void Cleanup(const LockBatchEntries& key_to_intent_type);
155
156
  // The global mutex should be taken only for very short duration, with no blocking wait.
157
  std::mutex global_mutex_;
158
159
  LockEntryMap locks_ GUARDED_BY(global_mutex_);
160
  // Cache of lock entries, to avoid allocation/deallocation of heavy LockedBatchEntry.
161
  std::vector<std::unique_ptr<LockedBatchEntry>> lock_entries_ GUARDED_BY(global_mutex_);
162
  std::vector<LockedBatchEntry*> free_lock_entries_ GUARDED_BY(global_mutex_);
163
};
164
165
const std::array<LockState, kIntentTypeSetMapSize> kIntentTypeSetMask = GenerateByMask(
166
    kSingleIntentMask);
167
168
const std::array<LockState, kIntentTypeSetMapSize> kIntentTypeSetConflicts = GenerateConflicts();
169
170
0
std::string SharedLockManager::ToString(const LockState& state) {
171
0
  std::string result = "{";
172
0
  bool first = true;
173
0
  for (auto type : kIntentTypeList) {
174
0
    if ((state & IntentTypeMask(type)) != 0) {
175
0
      if (first) {
176
0
        first = false;
177
0
      } else {
178
0
        result += ", ";
179
0
      }
180
0
      result += docdb::ToString(type);
181
0
    }
182
0
  }
183
0
  result += "}";
184
0
  return result;
185
0
}
186
187
48.8M
bool LockedBatchEntry::Lock(IntentTypeSet lock_type, CoarseTimePoint deadline) {
188
48.8M
  size_t type_idx = lock_type.ToUIntPtr();
189
48.8M
  auto& num_holding = this->num_holding;
190
48.8M
  auto old_value = num_holding.load(std::memory_order_acquire);
191
48.8M
  auto add = kIntentTypeSetAdd[type_idx];
192
49.6M
  for (;;) {
193
49.6M
    if ((old_value & kIntentTypeSetConflicts[type_idx]) == 0) {
194
48.8M
      auto new_value = old_value + add;
195
48.8M
      if (
num_holding.compare_exchange_weak(old_value, new_value, std::memory_order_acq_rel)48.8M
) {
196
48.8M
        return true;
197
48.8M
      }
198
18.4E
      continue;
199
48.8M
    }
200
812k
    num_waiters.fetch_add(1, std::memory_order_release);
201
813k
    auto se = ScopeExit([this] {
202
813k
      num_waiters.fetch_sub(1, std::memory_order_release);
203
813k
    });
204
812k
    std::unique_lock<std::mutex> lock(mutex);
205
812k
    old_value = num_holding.load(std::memory_order_acquire);
206
812k
    if ((old_value & kIntentTypeSetConflicts[type_idx]) != 0) {
207
712k
      if (
deadline != CoarseTimePoint::max()712k
) {
208
712k
        if (cond_var.wait_until(lock, deadline) == std::cv_status::timeout) {
209
212
          return false;
210
212
        }
211
18.4E
      } else {
212
18.4E
        cond_var.wait(lock);
213
18.4E
      }
214
712k
    }
215
812k
  }
216
48.8M
}
217
218
48.8M
void LockedBatchEntry::Unlock(IntentTypeSet lock_types) {
219
48.8M
  size_t type_idx = lock_types.ToUIntPtr();
220
48.8M
  auto sub = kIntentTypeSetAdd[type_idx];
221
222
  // Have to emulate fetch_sub here, because GCC 5.5 don't have it for int128
223
48.8M
  auto old_state = num_holding.load(std::memory_order_acquire);
224
48.8M
  LockState new_state;
225
48.8M
  for (;;) {
226
48.8M
    new_state = old_state - sub;
227
48.8M
    if (
num_holding.compare_exchange_weak(old_state, new_state, std::memory_order_acq_rel)48.8M
) {
228
48.8M
      break;
229
48.8M
    }
230
48.8M
  }
231
232
48.8M
  if (!num_waiters.load(std::memory_order_acquire)) {
233
48.7M
    return;
234
48.7M
  }
235
236
99.5k
  bool has_zero = false;
237
107k
  for (auto intent_type : lock_types) {
238
107k
    if (!(new_state & IntentTypeMask(intent_type))) {
239
89.0k
      has_zero = true;
240
89.0k
      break;
241
89.0k
    }
242
107k
  }
243
244
  // At least one of counters should become 0 to unblock waiting locks.
245
99.5k
  if (!has_zero) {
246
18.6k
    return;
247
18.6k
  }
248
249
80.9k
  {
250
    // Lock/unlock mutex as a barrier for Lock.
251
    // So we don't unlock and notify between check and wait in Lock.
252
80.9k
    std::lock_guard<std::mutex> lock(mutex);
253
80.9k
  }
254
255
80.9k
  cond_var.notify_all();
256
80.9k
}
257
258
7.92M
bool SharedLockManager::Impl::Lock(LockBatchEntries* key_to_intent_type, CoarseTimePoint deadline) {
259
7.92M
  TRACE("Locking a batch of $0 keys", key_to_intent_type->size());
260
7.92M
  Reserve(key_to_intent_type);
261
56.7M
  for (auto it = key_to_intent_type->begin(); it != key_to_intent_type->end(); 
++it48.8M
) {
262
48.8M
    const auto& key_and_intent_type = *it;
263
48.8M
    const auto intent_types = key_and_intent_type.intent_types;
264
18.4E
    VLOG(4) << "Locking " << yb::ToString(intent_types) << ": "
265
18.4E
            << key_and_intent_type.key.as_slice().ToDebugHexString();
266
48.8M
    if (!key_and_intent_type.locked->Lock(intent_types, deadline)) {
267
229
      while (it != key_to_intent_type->begin()) {
268
17
        --it;
269
17
        it->locked->Unlock(it->intent_types);
270
17
      }
271
212
      Cleanup(*key_to_intent_type);
272
212
      return false;
273
212
    }
274
48.8M
  }
275
7.92M
  TRACE("Acquired a lock batch of $0 keys", key_to_intent_type->size());
276
277
7.92M
  return true;
278
7.92M
}
279
280
7.92M
void SharedLockManager::Impl::Reserve(LockBatchEntries* key_to_intent_type) {
281
7.92M
  std::lock_guard<std::mutex> lock(global_mutex_);
282
48.8M
  for (auto& key_and_intent_type : *key_to_intent_type) {
283
48.8M
    auto& value = locks_[key_and_intent_type.key];
284
48.8M
    if (!value) {
285
46.3M
      if (!free_lock_entries_.empty()) {
286
43.9M
        value = free_lock_entries_.back();
287
43.9M
        free_lock_entries_.pop_back();
288
43.9M
      } else {
289
2.44M
        lock_entries_.emplace_back(std::make_unique<LockedBatchEntry>());
290
2.44M
        value = lock_entries_.back().get();
291
2.44M
      }
292
46.3M
    }
293
48.8M
    value->ref_count++;
294
48.8M
    key_and_intent_type.locked = value;
295
48.8M
  }
296
7.92M
}
297
298
7.93M
void SharedLockManager::Impl::Unlock(const LockBatchEntries& key_to_intent_type) {
299
7.93M
  TRACE("Unlocking a batch of $0 keys", key_to_intent_type.size());
300
301
48.8M
  for (const auto& key_and_intent_type : boost::adaptors::reverse(key_to_intent_type)) {
302
48.8M
    key_and_intent_type.locked->Unlock(key_and_intent_type.intent_types);
303
48.8M
  }
304
305
7.93M
  Cleanup(key_to_intent_type);
306
7.93M
}
307
308
7.93M
void SharedLockManager::Impl::Cleanup(const LockBatchEntries& key_to_intent_type) {
309
7.93M
  std::lock_guard<std::mutex> lock(global_mutex_);
310
48.8M
  for (const auto& item : key_to_intent_type) {
311
48.8M
    if (--(item.locked->ref_count) == 0) {
312
46.4M
      locks_.erase(item.key);
313
46.4M
      free_lock_entries_.push_back(item.locked);
314
46.4M
    }
315
48.8M
  }
316
7.93M
}
317
318
150k
SharedLockManager::SharedLockManager() : impl_(new Impl) {
319
150k
}
320
321
75.9k
SharedLockManager::~SharedLockManager() {}
322
323
7.93M
bool SharedLockManager::Lock(LockBatchEntries* key_to_intent_type, CoarseTimePoint deadline) {
324
7.93M
  return impl_->Lock(key_to_intent_type, deadline);
325
7.93M
}
326
327
7.93M
void SharedLockManager::Unlock(const LockBatchEntries& key_to_intent_type) {
328
7.93M
  impl_->Unlock(key_to_intent_type);
329
7.93M
}
330
331
}  // namespace docdb
332
}  // namespace yb