YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/util/memory/memory.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2010 Google Inc.  All Rights Reserved
2
//
3
// Licensed to the Apache Software Foundation (ASF) under one
4
// or more contributor license agreements.  See the NOTICE file
5
// distributed with this work for additional information
6
// regarding copyright ownership.  The ASF licenses this file
7
// to you under the Apache License, Version 2.0 (the
8
// "License"); you may not use this file except in compliance
9
// with the License.  You may obtain a copy of the License at
10
//
11
//   http://www.apache.org/licenses/LICENSE-2.0
12
//
13
// Unless required by applicable law or agreed to in writing,
14
// software distributed under the License is distributed on an
15
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16
// KIND, either express or implied.  See the License for the
17
// specific language governing permissions and limitations
18
// under the License.
19
//
20
// The following only applies to changes made to this file as part of YugaByte development.
21
//
22
// Portions Copyright (c) YugaByte, Inc.
23
//
24
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
25
// in compliance with the License.  You may obtain a copy of the License at
26
//
27
// http://www.apache.org/licenses/LICENSE-2.0
28
//
29
// Unless required by applicable law or agreed to in writing, software distributed under the License
30
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
31
// or implied.  See the License for the specific language governing permissions and limitations
32
// under the License.
33
//
34
//
35
// Classes for memory management, used by materializations
36
// (arenas, segments, and STL collections parametrized via arena allocators)
37
// so that memory usage can be controlled at the application level.
38
//
39
// Materializations can be parametrized by specifying an instance of a
40
// BufferAllocator. The allocator implements
41
// memory management policy (e.g. setting allocation limits). Allocators may
42
// be shared between multiple materializations; e.g. you can designate a
43
// single allocator per a single user request, thus setting bounds on memory
44
// usage on a per-request basis.
45
46
#ifndef YB_UTIL_MEMORY_MEMORY_H_
47
#define YB_UTIL_MEMORY_MEMORY_H_
48
49
#include <stddef.h>
50
51
#include <algorithm>
52
#include <limits>
53
#include <memory>
54
#include <vector>
55
56
#include <boost/mpl/if.hpp>
57
#include <boost/type_traits/is_const.hpp>
58
#include <glog/logging.h>
59
60
#include "yb/gutil/logging-inl.h"
61
#include "yb/gutil/macros.h"
62
#include "yb/gutil/singleton.h"
63
#include "yb/gutil/strings/stringpiece.h"
64
65
#include "yb/util/boost_mutex_utils.h"
66
#include "yb/util/mutex.h"
67
68
using std::copy;
69
using std::max;
70
using std::min;
71
using std::numeric_limits;
72
using std::reverse;
73
using std::sort;
74
using std::swap;
75
using std::vector;
76
77
namespace yb {
78
79
class BufferAllocator;
80
class MemTracker;
81
82
void OverwriteWithPattern(char* p, size_t len, GStringPiece pattern);
83
84
// Wrapper for a block of data allocated by a BufferAllocator. Owns the block.
85
// (To release the block, destroy the buffer - it will then return it via the
86
// same allocator that has been used to create it).
87
class Buffer {
88
 public:
89
2
  Buffer() : data_(nullptr), size_(0), allocator_(nullptr) {}
90
91
  Buffer(void* data, size_t size, BufferAllocator* allocator)
92
      : data_(CHECK_NOTNULL(data)),
93
        size_(size),
94
23.9M
        allocator_(allocator) {
95
23.9M
#ifndef NDEBUG
96
23.9M
    OverwriteWithPattern(reinterpret_cast<char*>(data_), size_,
97
23.9M
                         "NEWNEWNEWNEWNEWNEWNEWNEWNEWNEWNEWNEW"
98
23.9M
                         "NEWNEWNEWNEWNEWNEWNEWNEWNEWNEWNEWNEW"
99
23.9M
                         "NEWNEWNEWNEWNEWNEWNEWNEWNEWNEWNEWNEW");
100
23.9M
#endif
101
23.9M
  }
102
103
  Buffer(Buffer&& rhs)
104
787k
    : data_(rhs.data_), size_(rhs.size_), allocator_(rhs.allocator_) {
105
787k
    rhs.Release();
106
787k
  }
107
108
  Buffer(const Buffer& rhs) = delete;
109
  void operator=(const Buffer& rhs) = delete;
110
111
  ~Buffer();
112
113
60.2M
  void* data() const { return data_; }   // The data buffer.
114
0
  uint8_t* udata() const { return static_cast<uint8_t*>(data_); }
115
0
  uint8_t* end() const { return udata() + size_; }
116
48.1M
  size_t size() const { return size_; }  // In bytes.
117
118
12.8M
  void Release() {
119
12.8M
    data_ = nullptr;
120
12.8M
    allocator_ = nullptr;
121
12.8M
  }
122
123
0
  explicit operator bool() const {
124
0
    return data_ != nullptr;
125
0
  }
126
127
24.9M
  bool operator!() const {
128
24.9M
    return data_ == nullptr;
129
24.9M
  }
130
 private:
131
  friend class BufferAllocator;
132
133
  // Called by a successful realloc.
134
0
  void Update(void* new_data, size_t new_size) {
135
0
#ifndef NDEBUG
136
0
    if (new_size > size_) {
137
0
      OverwriteWithPattern(reinterpret_cast<char*>(new_data) + size_,
138
0
                           new_size - size_, "NEW");
139
0
    }
140
0
#endif
141
0
    data_ = new_data;
142
0
    size_ = new_size;
143
0
  }
144
145
  void* data_;
146
  size_t size_;
147
  BufferAllocator* allocator_;
148
};
149
150
// Allocators allow applications to control memory usage. They are
151
// used by materializations to allocate blocks of memory arenas.
152
// BufferAllocator is an abstract class that defines a common contract of
153
// all implementations of allocators. Specific allocators provide specific
154
// features, e.g. enforced resource limits, thread safety, etc.
155
class BufferAllocator {
156
 public:
157
519
  virtual ~BufferAllocator() {}
158
159
  // Called by the user when a new block of memory is needed. The 'requested'
160
  // parameter specifies how much memory (in bytes) the user would like to get.
161
  // The 'minimal' parameter specifies how much he is willing to settle for.
162
  // The allocator returns a buffer sized in the range [minimal, requested],
163
  // or NULL if the request can't be satisfied. When the buffer is destroyed,
164
  // its destructor calls the FreeInternal() method on its allocator.
165
  // CAVEAT: The allocator must outlive all buffers returned by it.
166
  //
167
  // Corner cases:
168
  // 1. If requested == 0, the allocator will always return a valid Buffer
169
  //    with a non-NULL data pointer and zero capacity.
170
  // 2. If minimal == 0, the allocator will always return a valid Buffer
171
  //    with a non-NULL data pointer, possibly with zero capacity.
172
12.0M
  Buffer BestEffortAllocate(size_t requested, size_t minimal) {
173
12.0M
    DCHECK_LE(minimal, requested);
174
12.0M
    Buffer result = AllocateInternal(requested, minimal, this);
175
12.0M
    LogAllocation(requested, minimal, result);
176
12.0M
    return result;
177
12.0M
  }
178
179
  // Called by the user when a new block of memory is needed. Equivalent to
180
  // BestEffortAllocate(requested, requested).
181
0
  Buffer Allocate(size_t requested) {
182
0
    return BestEffortAllocate(requested, requested);
183
0
  }
184
185
  // Returns the amount of memory (in bytes) still available for this allocator.
186
  // For unbounded allocators (like raw HeapBufferAllocator) this is the highest
187
  // size_t value possible.
188
  // TODO(user): consider making pure virtual.
189
0
  virtual size_t Available() const { return numeric_limits<size_t>::max(); }
190
191
 protected:
192
  friend class Buffer;
193
194
7.17k
  BufferAllocator() {}
195
196
  // Expose the constructor to subclasses of BufferAllocator.
197
12.0M
  Buffer CreateBuffer(void* data, size_t size, BufferAllocator* allocator) {
198
12.0M
    return Buffer(data, size, allocator);
199
12.0M
  }
200
201
  // Expose Buffer::Update to subclasses of BufferAllocator.
202
0
  void UpdateBuffer(void* new_data, size_t new_size, Buffer* buffer) {
203
0
    buffer->Update(new_data, new_size);
204
0
  }
205
206
  // Called by chained buffer allocators.
207
  Buffer DelegateAllocate(BufferAllocator* delegate,
208
                          size_t requested,
209
                          size_t minimal,
210
5.24k
                          BufferAllocator* originator) {
211
5.24k
    return delegate->AllocateInternal(requested, minimal, originator);
212
5.24k
  }
213
214
  // Called by chained buffer allocators.
215
  bool DelegateReallocate(BufferAllocator* delegate,
216
                          size_t requested,
217
                          size_t minimal,
218
                          Buffer* buffer,
219
0
                          BufferAllocator* originator) {
220
0
    return delegate->ReallocateInternal(requested, minimal, buffer, originator);
221
0
  }
222
223
  // Called by chained buffer allocators.
224
1.38k
  void DelegateFree(BufferAllocator* delegate, Buffer* buffer) {
225
1.38k
    delegate->FreeInternal(buffer);
226
1.38k
  }
227
228
 private:
229
  // Implemented by concrete subclasses.
230
  virtual Buffer AllocateInternal(size_t requested,
231
                                  size_t minimal,
232
                                  BufferAllocator* originator) = 0;
233
234
  // Implemented by concrete subclasses. Returns false on failure.
235
  virtual bool ReallocateInternal(size_t requested,
236
                                  size_t minimal,
237
                                  Buffer* buffer,
238
                                  BufferAllocator* originator) = 0;
239
240
  // Implemented by concrete subclasses.
241
  virtual void FreeInternal(Buffer* buffer) = 0;
242
243
  // Logs a warning message if the allocation failed or if it returned less than
244
  // the required number of bytes.
245
  void LogAllocation(size_t required, size_t minimal, const Buffer& buffer);
246
247
  DISALLOW_COPY_AND_ASSIGN(BufferAllocator);
248
};
249
250
// Allocates buffers on the heap, with no memory limits. Uses standard C
251
// allocation functions (malloc, realloc, free).
252
class HeapBufferAllocator : public BufferAllocator {
253
 public:
254
0
  virtual ~HeapBufferAllocator() {}
255
256
  // Returns a singleton instance of the heap allocator.
257
3.38M
  static HeapBufferAllocator* Get() {
258
3.38M
    return Singleton<HeapBufferAllocator>::get();
259
3.38M
  }
260
261
0
  virtual size_t Available() const override {
262
0
    return numeric_limits<size_t>::max();
263
0
  }
264
265
 private:
266
  // Allocates memory that is aligned to 16 way.
267
  // Use if you want to boost SIMD operations on the memory area.
268
  const bool aligned_mode_;
269
270
  friend class Singleton<HeapBufferAllocator>;
271
272
  // Always allocates 'requested'-sized buffer, or returns NULL on OOM.
273
  virtual Buffer AllocateInternal(size_t requested,
274
                                  size_t minimal,
275
                                  BufferAllocator* originator) override;
276
277
  virtual bool ReallocateInternal(size_t requested,
278
                                  size_t minimal,
279
                                  Buffer* buffer,
280
                                  BufferAllocator* originator) override;
281
282
  void* Malloc(size_t size);
283
  void* Realloc(void* previousData, size_t previousSize, size_t newSize);
284
285
  virtual void FreeInternal(Buffer* buffer) override;
286
287
  HeapBufferAllocator();
288
  explicit HeapBufferAllocator(bool aligned_mode)
289
0
      : aligned_mode_(aligned_mode) {}
290
291
  DISALLOW_COPY_AND_ASSIGN(HeapBufferAllocator);
292
};
293
294
// Wrapper around the delegate allocator, that clears all newly allocated
295
// (and reallocated) memory.
296
class ClearingBufferAllocator : public BufferAllocator {
297
 public:
298
  // Does not take ownership of the delegate.
299
  explicit ClearingBufferAllocator(BufferAllocator* delegate)
300
0
      : delegate_(delegate) {}
301
302
0
  virtual size_t Available() const override {
303
0
    return delegate_->Available();
304
0
  }
305
306
 private:
307
  virtual Buffer AllocateInternal(size_t requested,
308
                                  size_t minimal,
309
                                  BufferAllocator* originator) override;
310
311
  virtual bool ReallocateInternal(size_t requested,
312
                                  size_t minimal,
313
                                  Buffer* buffer,
314
                                  BufferAllocator* originator) override;
315
316
  virtual void FreeInternal(Buffer* buffer) override;
317
318
  BufferAllocator* delegate_;
319
  DISALLOW_COPY_AND_ASSIGN(ClearingBufferAllocator);
320
};
321
322
// Abstract policy for modifying allocation requests - e.g. enforcing quotas.
323
class Mediator {
324
 public:
325
0
  Mediator() {}
326
0
  virtual ~Mediator() {}
327
328
  // Called by an allocator when a allocation request is processed.
329
  // Must return a value in the range [minimal, requested], or zero. Returning
330
  // zero (if minimal is non-zero) indicates denial to allocate. Returning
331
  // non-zero indicates that the request should be capped at that value.
332
  virtual size_t Allocate(size_t requested, size_t minimal) = 0;
333
334
  // Called by an allocator when the specified amount (in bytes) is released.
335
  virtual void Free(size_t amount) = 0;
336
337
  // TODO(user): consider making pure virtual.
338
0
  virtual size_t Available() const { return numeric_limits<size_t>::max(); }
339
};
340
341
// Optionally thread-safe skeletal implementation of a 'quota' abstraction,
342
// providing methods to allocate resources against the quota, and return them.
343
template<bool thread_safe>
344
class Quota : public Mediator {
345
 public:
346
  explicit Quota(bool enforced) : usage_(0), enforced_(enforced) {}
347
  virtual ~Quota() {}
348
349
  // Returns a value in range [minimal, requested] if not exceeding remaining
350
  // quota or if the quota is not enforced (soft quota), and adjusts the usage
351
  // value accordingly.  Otherwise, returns zero. The semantics of 'remaining
352
  // quota' are defined by subclasses (that must supply GetQuotaInternal()
353
  // method).
354
  virtual size_t Allocate(size_t requested, size_t minimal) override;
355
356
  virtual void Free(size_t amount) override;
357
358
  // Returns memory still available in the quota. For unenforced Quota objects,
359
  // you are still able to perform _minimal_ allocations when the available
360
  // quota is 0 (or less than "minimal" param).
361
0
  virtual size_t Available() const override {
362
0
    lock_guard_maybe<Mutex> lock(Quota<thread_safe>::mutex());
363
0
    const size_t quota = GetQuotaInternal();
364
0
    return (usage_ >= quota) ? 0 : (quota - usage_);
365
0
  }
366
367
  // Returns the current quota value.
368
  size_t GetQuota() const;
369
370
  // Returns the current usage value, defined as a sum of all the values
371
  // granted by calls to Allocate, less these released via calls to Free.
372
  size_t GetUsage() const;
373
374
0
  bool enforced() const {
375
0
    return enforced_;
376
0
  }
377
378
 protected:
379
  // Overridden by specific implementations, to define semantics of
380
  // the quota, i.e. the total amount of resources that the mediator will
381
  // allocate. Called directly from GetQuota that optionally provides
382
  // thread safety. An 'Allocate' request will succeed if
383
  // GetUsage() + minimal <= GetQuota() or if the quota is not enforced (soft
384
  // quota).
385
  virtual size_t GetQuotaInternal() const = 0;
386
387
0
  Mutex* mutex() const { return thread_safe ? &mutex_ : NULL; }
388
389
 private:
390
  mutable Mutex mutex_;
391
  size_t usage_;
392
  bool enforced_;
393
  DISALLOW_COPY_AND_ASSIGN(Quota);
394
};
395
396
// Optionally thread-safe static quota implementation (where quota is explicitly
397
// set to a concrete numeric value).
398
template<bool thread_safe>
399
class StaticQuota : public Quota<thread_safe> {
400
 public:
401
  explicit StaticQuota(size_t quota)
402
      : Quota<thread_safe>(true) {
403
    SetQuota(quota);
404
  }
405
  StaticQuota(size_t quota, bool enforced)
406
      : Quota<thread_safe>(enforced) {
407
    SetQuota(quota);
408
  }
409
  virtual ~StaticQuota() {}
410
411
  // Sets quota to the new value.
412
  void SetQuota(const size_t quota);
413
414
 protected:
415
0
  virtual size_t GetQuotaInternal() const { return quota_; }
416
417
 private:
418
  size_t quota_;
419
  DISALLOW_COPY_AND_ASSIGN(StaticQuota);
420
};
421
422
// Places resource limits on another allocator, using the specified Mediator
423
// (e.g. quota) implementation.
424
//
425
// If the mediator and the delegate allocator are thread-safe, this allocator
426
// is also thread-safe, to the extent that it will not introduce any
427
// state inconsistencies. However, without additional synchronization,
428
// allocation requests are not atomic end-to-end. This way, it is deadlock-
429
// resilient (even if you have cyclic relationships between allocators) and
430
// allows better concurrency. But, it may cause over-conservative
431
// allocations under memory contention, if you have multiple levels of
432
// mediating allocators. For example, if two requests that can't both be
433
// satisfied are submitted concurrently, it may happen that one of them succeeds
434
// but gets smaller buffer allocated than it would if the requests were strictly
435
// ordered. This is usually not a problem, however, as you don't really want to
436
// operate so close to memory limits that some of your allocations can't be
437
// satisfied. If you do have a simple, cascading graph of allocators though,
438
// and want to force requests be atomic end-to-end, put a
439
// ThreadSafeBufferAllocator at the entry point.
440
class MediatingBufferAllocator : public BufferAllocator {
441
 public:
442
  // Does not take ownership of the delegate, nor the mediator, allowing
443
  // both to be reused.
444
  MediatingBufferAllocator(BufferAllocator* const delegate,
445
                           Mediator* const mediator)
446
      : delegate_(delegate),
447
0
        mediator_(mediator) {}
448
449
0
  virtual ~MediatingBufferAllocator() {}
450
451
0
  virtual size_t Available() const override {
452
0
    return min(delegate_->Available(), mediator_->Available());
453
0
  }
454
455
 private:
456
  virtual Buffer AllocateInternal(size_t requested,
457
                                  size_t minimal,
458
                                  BufferAllocator* originator) override;
459
460
  virtual bool ReallocateInternal(size_t requested,
461
                                  size_t minimal,
462
                                  Buffer* buffer,
463
                                  BufferAllocator* originator) override;
464
465
  virtual void FreeInternal(Buffer* buffer) override;
466
467
  BufferAllocator* delegate_;
468
  Mediator* const mediator_;
469
};
470
471
// Convenience non-thread-safe static memory bounds enforcer.
472
// Combines MediatingBufferAllocator with a StaticQuota.
473
class MemoryLimit : public BufferAllocator {
474
 public:
475
  // Creates a limiter based on the default, heap allocator. Quota is infinite.
476
  // (Can be set using SetQuota).
477
  MemoryLimit()
478
      : quota_(std::numeric_limits<size_t>::max()),
479
0
        allocator_(HeapBufferAllocator::Get(), &quota_) {}
480
481
  // Creates a limiter based on the default, heap allocator.
482
  explicit MemoryLimit(size_t quota)
483
      : quota_(quota),
484
0
        allocator_(HeapBufferAllocator::Get(), &quota_) {}
485
486
  // Creates a limiter relaying to the specified delegate allocator.
487
  MemoryLimit(size_t quota, BufferAllocator* const delegate)
488
      : quota_(quota),
489
0
        allocator_(delegate, &quota_) {}
490
491
  // Creates a (possibly non-enforcing) limiter relaying to the specified
492
  // delegate allocator.
493
  MemoryLimit(size_t quota, bool enforced, BufferAllocator* const delegate)
494
      : quota_(quota, enforced),
495
0
        allocator_(delegate, &quota_) {}
496
497
0
  virtual ~MemoryLimit() {}
498
499
0
  virtual size_t Available() const override {
500
0
    return allocator_.Available();
501
0
  }
502
503
0
  size_t GetQuota() const { return quota_.GetQuota(); }
504
0
  size_t GetUsage() const { return quota_.GetUsage(); }
505
0
  void SetQuota(const size_t quota) { quota_.SetQuota(quota); }
506
507
 private:
508
  virtual Buffer AllocateInternal(size_t requested,
509
                                  size_t minimal,
510
0
                                  BufferAllocator* originator) override {
511
0
    return DelegateAllocate(&allocator_, requested, minimal, originator);
512
0
  }
513
  virtual bool ReallocateInternal(size_t requested,
514
                                  size_t minimal,
515
                                  Buffer* buffer,
516
0
                                  BufferAllocator* originator) override {
517
0
    return DelegateReallocate(&allocator_, requested, minimal, buffer,
518
0
                              originator);
519
0
  }
520
0
  virtual void FreeInternal(Buffer* buffer) override {
521
0
    DelegateFree(&allocator_, buffer);
522
0
  }
523
524
  StaticQuota<false> quota_;
525
  MediatingBufferAllocator allocator_;
526
};
527
528
// An allocator that allows to bypass the (potential) soft quota below for a
529
// given amount of memory usage. The goal is to make the allocation methods and
530
// Available() work as if the allocator below had at least bypassed_amount of
531
// soft quota. Of course this class doesn't allow to exceed the hard quota.
532
class SoftQuotaBypassingBufferAllocator : public BufferAllocator {
533
 public:
534
  SoftQuotaBypassingBufferAllocator(BufferAllocator* allocator,
535
                                    size_t bypassed_amount)
536
      : allocator_(std::numeric_limits<size_t>::max(), allocator),
537
0
        bypassed_amount_(bypassed_amount) {}
538
539
0
  virtual size_t Available() const override {
540
0
    const size_t usage = allocator_.GetUsage();
541
0
    size_t available = allocator_.Available();
542
0
    if (bypassed_amount_ > usage) {
543
0
      available = max(bypassed_amount_ - usage, available);
544
0
    }
545
0
    return available;
546
0
  }
547
548
 private:
549
  // Calculates how much to increase the minimal parameter to allocate more
550
  // aggressively in the underlying allocator. This is to avoid getting only
551
  // very small allocations when we exceed the soft quota below. The request
552
  // with increased minimal size is more likely to fail because of exceeding
553
  // hard quota, so we also fall back to the original minimal size.
554
0
  size_t AdjustMinimal(size_t requested, size_t minimal) const {
555
0
    return min(requested, max(minimal, Available()));
556
0
  }
557
558
  virtual Buffer AllocateInternal(size_t requested,
559
                                  size_t minimal,
560
0
                                  BufferAllocator* originator) override {
561
0
    // Try increasing the "minimal" parameter to allocate more aggresively
562
0
    // within the bypassed amount of soft quota.
563
0
    Buffer result = DelegateAllocate(&allocator_,
564
0
                                     requested,
565
0
                                     AdjustMinimal(requested, minimal),
566
0
                                     originator);
567
0
    if (result) {
568
0
      return result;
569
0
    } else {
570
0
      return DelegateAllocate(&allocator_, requested, minimal, originator);
571
0
    }
572
0
  }
573
574
  virtual bool ReallocateInternal(size_t requested,
575
                                  size_t minimal,
576
                                  Buffer* buffer,
577
0
                                  BufferAllocator* originator) override {
578
0
    size_t adjusted_minimal = AdjustMinimal(requested, minimal);
579
0
    if (DelegateReallocate(&allocator_, requested, adjusted_minimal, buffer, originator)) {
580
0
      return true;
581
0
    } else {
582
0
      return DelegateReallocate(&allocator_, requested, minimal, buffer, originator);
583
0
    }
584
0
  }
585
586
0
  virtual void FreeInternal(Buffer* buffer) override {
587
0
    DelegateFree(&allocator_, buffer);
588
0
  }
589
590
  // Using MemoryLimit with "infinite" limit to get GetUsage().
591
  MemoryLimit allocator_;
592
  size_t bypassed_amount_;
593
};
594
595
// An interface for a MemoryStatisticsCollector - an object which collects
596
// information about the memory usage of the allocator. The collector will
597
// gather statistics about memory usage based on information received from the
598
// allocator.
599
class MemoryStatisticsCollectorInterface {
600
 public:
601
0
  MemoryStatisticsCollectorInterface() {}
602
603
0
  virtual ~MemoryStatisticsCollectorInterface() {}
604
605
  // Informs the collector that the allocator granted bytes memory. Note that in
606
  // the case of reallocation bytes should be the increase in total memory
607
  // usage, not the total size of the buffer after reallocation.
608
  virtual void AllocatedMemoryBytes(size_t bytes) = 0;
609
610
  // Informs the collector that the allocator received a request for at least
611
  // bytes memory, and rejected it (meaning that it granted nothing).
612
  virtual void RefusedMemoryBytes(size_t bytes) = 0;
613
614
  // Informs the collector that bytes memory have been released to the
615
  // allocator.
616
  virtual void FreedMemoryBytes(size_t bytes) = 0;
617
618
 private:
619
  DISALLOW_COPY_AND_ASSIGN(MemoryStatisticsCollectorInterface);
620
};
621
622
class MemoryStatisticsCollectingBufferAllocator : public BufferAllocator {
623
 public:
624
  // Does not take ownership of the delegate.
625
  // Takes ownership of memory_stats_collector.
626
  MemoryStatisticsCollectingBufferAllocator(
627
      BufferAllocator* const delegate,
628
      MemoryStatisticsCollectorInterface* const memory_stats_collector)
629
      : delegate_(delegate),
630
0
        memory_stats_collector_(memory_stats_collector) {}
631
632
0
  virtual ~MemoryStatisticsCollectingBufferAllocator() {}
633
634
0
  virtual size_t Available() const override {
635
0
    return delegate_->Available();
636
0
  }
637
638
 private:
639
  virtual Buffer AllocateInternal(size_t requested,
640
                                  size_t minimal,
641
                                  BufferAllocator* originator) override;
642
643
  virtual bool ReallocateInternal(size_t requested,
644
                                  size_t minimal,
645
                                  Buffer* buffer,
646
                                  BufferAllocator* originator) override;
647
648
  virtual void FreeInternal(Buffer* buffer) override;
649
650
  BufferAllocator* delegate_;
651
  std::unique_ptr<MemoryStatisticsCollectorInterface>
652
      memory_stats_collector_;
653
};
654
655
// BufferAllocator which uses MemTracker to keep track of and optionally
656
// (if a limit is set on the MemTracker) regulate memory consumption.
657
class MemoryTrackingBufferAllocator : public BufferAllocator {
658
 public:
659
  // Does not take ownership of the delegate. The delegate must remain
660
  // valid for the lifetime of this allocator. Increments reference
661
  // count for 'mem_tracker'.
662
  // If 'mem_tracker' has a limit and 'enforce_limit' is true, then
663
  // the classes calling this buffer allocator (whether directly, or
664
  // through an Arena) must be able to handle the case when allocation
665
  // fails. If 'enforce_limit' is false (this is the default), then
666
  // allocation will always succeed.
667
  MemoryTrackingBufferAllocator(BufferAllocator* const delegate,
668
                                std::shared_ptr<MemTracker> mem_tracker,
669
                                bool enforce_limit = false)
670
      : delegate_(delegate),
671
        mem_tracker_(std::move(mem_tracker)),
672
1.88k
        enforce_limit_(enforce_limit) {}
673
674
519
  virtual ~MemoryTrackingBufferAllocator() {}
675
676
  // If enforce limit is false, this always returns maximum possible value
677
  // for int64_t (std::numeric_limits<int64_t>::max()). Otherwise, this
678
  // is equivalent to calling mem_tracker_->SpareCapacity();
679
  virtual size_t Available() const override;
680
681
 private:
682
683
  // If enforce_limit_ is true, this is equivalent to calling
684
  // mem_tracker_->TryConsume(bytes). If enforce_limit_ is false and
685
  // mem_tracker_->TryConsume(bytes) is false, we call
686
  // mem_tracker_->Consume(bytes) and always return true.
687
  bool TryConsume(int64_t bytes);
688
689
  virtual Buffer AllocateInternal(size_t requested,
690
                                  size_t minimal,
691
                                  BufferAllocator* originator) override;
692
693
  virtual bool ReallocateInternal(size_t requested,
694
                                  size_t minimal,
695
                                  Buffer* buffer,
696
                                  BufferAllocator* originator) override;
697
698
  virtual void FreeInternal(Buffer* buffer) override;
699
700
  BufferAllocator* delegate_;
701
  std::shared_ptr<MemTracker> mem_tracker_;
702
  bool enforce_limit_;
703
};
704
705
// Synchronizes access to AllocateInternal and FreeInternal, and exposes the
706
// mutex for use by subclasses. Allocation requests performed through this
707
// allocator are atomic end-to-end. Template parameter DelegateAllocatorType
708
// allows to specify a subclass of BufferAllocator for the delegate, to allow
709
// subclasses of ThreadSafeBufferAllocator to access additional methods provided
710
// by the allocator subclass. If this is not needed, it can be set to
711
// BufferAllocator.
712
template <class DelegateAllocatorType>
713
class ThreadSafeBufferAllocator : public BufferAllocator {
714
 public:
715
  // Does not take ownership of the delegate.
716
  explicit ThreadSafeBufferAllocator(DelegateAllocatorType* delegate)
717
      : delegate_(delegate) {}
718
  virtual ~ThreadSafeBufferAllocator() {}
719
720
0
  virtual size_t Available() const override {
721
0
    lock_guard_maybe<Mutex> lock(mutex());
722
0
    return delegate()->Available();
723
0
  }
724
725
 protected:
726
0
  Mutex* mutex() const { return &mutex_; }
727
  // Expose the delegate allocator, with the precise type of the allocator
728
  // specified by the template parameter. The delegate() methods themselves
729
  // don't give any thread-safety guarantees. Protect all uses taking the Mutex
730
  // exposed by the mutex() method.
731
0
  DelegateAllocatorType* delegate() { return delegate_; }
732
0
  const DelegateAllocatorType* delegate() const { return delegate_; }
733
734
 private:
735
  virtual Buffer AllocateInternal(size_t requested,
736
                                  size_t minimal,
737
0
                                  BufferAllocator* originator) override {
738
0
    lock_guard_maybe<Mutex> lock(mutex());
739
0
    return DelegateAllocate(delegate(), requested, minimal, originator);
740
0
  }
741
742
  virtual bool ReallocateInternal(size_t requested,
743
                                  size_t minimal,
744
                                  Buffer* buffer,
745
0
                                  BufferAllocator* originator) override {
746
0
    lock_guard_maybe<Mutex> lock(mutex());
747
0
    return DelegateReallocate(delegate(), requested, minimal, buffer,
748
0
                              originator);
749
0
  }
750
751
0
  virtual void FreeInternal(Buffer* buffer) override {
752
0
    lock_guard_maybe<Mutex> lock(mutex());
753
0
    DelegateFree(delegate(), buffer);
754
0
  }
755
756
  DelegateAllocatorType* delegate_;
757
  mutable Mutex mutex_;
758
  DISALLOW_COPY_AND_ASSIGN(ThreadSafeBufferAllocator);
759
};
760
761
// A version of ThreadSafeBufferAllocator that owns the supplied delegate
762
// allocator.
763
template <class DelegateAllocatorType>
764
class OwningThreadSafeBufferAllocator
765
    : public ThreadSafeBufferAllocator<DelegateAllocatorType> {
766
 public:
767
  explicit OwningThreadSafeBufferAllocator(DelegateAllocatorType* delegate)
768
      : ThreadSafeBufferAllocator<DelegateAllocatorType>(delegate),
769
        delegate_owned_(delegate) {}
770
  virtual ~OwningThreadSafeBufferAllocator() {}
771
772
 private:
773
  std::unique_ptr<DelegateAllocatorType> delegate_owned_;
774
};
775
776
class ThreadSafeMemoryLimit
777
    : public OwningThreadSafeBufferAllocator<MemoryLimit> {
778
 public:
779
  ThreadSafeMemoryLimit(size_t quota, bool enforced,
780
                        BufferAllocator* const delegate)
781
      : OwningThreadSafeBufferAllocator<MemoryLimit>(
782
0
            new MemoryLimit(quota, enforced, delegate)) {}
783
0
  virtual ~ThreadSafeMemoryLimit() {}
784
785
0
  size_t GetQuota() const {
786
0
    lock_guard_maybe<Mutex> lock(mutex());
787
0
    return delegate()->GetQuota();
788
0
  }
789
0
  size_t GetUsage() const {
790
0
    lock_guard_maybe<Mutex> lock(mutex());
791
0
    return delegate()->GetUsage();
792
0
  }
793
0
  void SetQuota(const size_t quota) {
794
0
    lock_guard_maybe<Mutex> lock(mutex());
795
0
    delegate()->SetQuota(quota);
796
0
  }
797
};
798
799
// A BufferAllocator that can be given ownership of many objects of given type.
800
// These objects will then be deleted when the buffer allocator is destroyed.
801
// The objects added last are deleted first (LIFO).
802
template <typename OwnedType>
803
class OwningBufferAllocator : public BufferAllocator {
804
 public:
805
  // Doesn't take ownership of delegate.
806
  explicit OwningBufferAllocator(BufferAllocator* const delegate)
807
      : delegate_(delegate) {}
808
809
  virtual ~OwningBufferAllocator() {
810
    // Delete elements starting from the end.
811
    while (!owned_.empty()) {
812
      OwnedType* p = owned_.back();
813
      owned_.pop_back();
814
      delete p;
815
    }
816
  }
817
818
  // Add to the collection of objects owned by this allocator. The object added
819
  // last is deleted first.
820
  OwningBufferAllocator* Add(OwnedType* p) {
821
    owned_.push_back(p);
822
    return this;
823
  }
824
825
  virtual size_t Available() const override {
826
    return delegate_->Available();
827
  }
828
829
 private:
830
  virtual Buffer AllocateInternal(size_t requested,
831
                                  size_t minimal,
832
                                  BufferAllocator* originator) override {
833
    return DelegateAllocate(delegate_, requested, minimal, originator);
834
  }
835
836
  virtual bool ReallocateInternal(size_t requested,
837
                                  size_t minimal,
838
                                  Buffer* buffer,
839
                                  BufferAllocator* originator) override {
840
    return DelegateReallocate(delegate_, requested, minimal, buffer,
841
                              originator);
842
  }
843
844
  virtual void FreeInternal(Buffer* buffer) override {
845
    DelegateFree(delegate_, buffer);
846
  }
847
848
  // Not using PointerVector here because we want to guarantee certain order of
849
  // deleting elements (starting from the ones added last).
850
  vector<OwnedType*> owned_;
851
  BufferAllocator* delegate_;
852
};
853
854
// Buffer allocator that tries to guarantee the exact and consistent amount
855
// of memory. Uses hard MemoryLimit to enforce the upper bound but also
856
// guarantees consistent allocations by ignoring minimal requested amounts and
857
// always returning the full amount of memory requested if available.
858
// Allocations will fail if the memory requested would exceed the quota or if
859
// the underlying allocator fails to provide the memory.
860
class GuaranteeMemory : public BufferAllocator {
861
 public:
862
  // Doesn't take ownership of 'delegate'.
863
  GuaranteeMemory(size_t memory_quota,
864
                  BufferAllocator* delegate)
865
      : limit_(memory_quota, true, delegate),
866
0
        memory_guarantee_(memory_quota) {}
867
868
0
  virtual size_t Available() const override {
869
0
    return memory_guarantee_ - limit_.GetUsage();
870
0
  }
871
872
 private:
873
  virtual Buffer AllocateInternal(size_t requested,
874
                                  size_t minimal,
875
0
                                  BufferAllocator* originator) override {
876
0
    if (requested > Available()) {
877
0
      return Buffer();
878
0
    } else {
879
0
      return DelegateAllocate(&limit_, requested, requested, originator);
880
0
    }
881
0
  }
882
883
  virtual bool ReallocateInternal(size_t requested,
884
                                  size_t minimal,
885
                                  Buffer* buffer,
886
0
                                  BufferAllocator* originator) override {
887
0
    int64 additional_memory = requested - (buffer != NULL ? buffer->size() : 0);
888
0
    return additional_memory <= static_cast<int64>(Available())
889
0
        && DelegateReallocate(&limit_, requested, requested,
890
0
                              buffer, originator);
891
0
  }
892
893
0
  virtual void FreeInternal(Buffer* buffer) override {
894
0
    DelegateFree(&limit_, buffer);
895
0
  }
896
897
  MemoryLimit limit_;
898
  size_t memory_guarantee_;
899
  DISALLOW_COPY_AND_ASSIGN(GuaranteeMemory);
900
};
901
902
// Implementation of inline and template methods
903
904
template<bool thread_safe>
905
size_t Quota<thread_safe>::Allocate(const size_t requested,
906
0
                                    const size_t minimal) {
907
0
  lock_guard_maybe<Mutex> lock(mutex());
908
0
  DCHECK_LE(minimal, requested)
909
0
      << "\"minimal\" shouldn't be bigger than \"requested\"";
910
0
  const size_t quota = GetQuotaInternal();
911
0
  size_t allocation;
912
0
  if (usage_ > quota || minimal > quota - usage_) {
913
0
    // OOQ (Out of quota).
914
0
    if (!enforced() && minimal <= numeric_limits<size_t>::max() - usage_) {
915
0
      // The quota is unenforced and the value of "minimal" won't cause an
916
0
      // overflow. Perform a minimal allocation.
917
0
      allocation = minimal;
918
0
    } else {
919
0
      allocation = 0;
920
0
    }
921
0
    LOG(WARNING) << "Out of quota. Requested: " << requested
922
0
                 << " bytes, or at least minimal: " << minimal
923
0
                 << ". Current quota value is: " << quota
924
0
                 << " while current usage is: " << usage_
925
0
                 << ". The quota is " << (enforced() ? "" : "not ")
926
0
                 << "enforced. "
927
0
                 << ((allocation == 0) ? "Did not allocate any memory."
928
0
                 : "Allocated the minimal value requested.");
929
0
  } else {
930
0
    allocation = min(requested, quota - usage_);
931
0
  }
932
0
  usage_ += allocation;
933
0
  return allocation;
934
0
}
935
936
template<bool thread_safe>
937
0
void Quota<thread_safe>::Free(size_t amount) {
938
0
  lock_guard_maybe<Mutex> lock(mutex());
939
0
  usage_ -= amount;
940
0
  // threads allocate/free memory concurrently via the same Quota object that is
941
0
  // not protected with a mutex (thread_safe == false).
942
0
  if (usage_ > (numeric_limits<size_t>::max() - (1 << 28))) {
943
0
    LOG(ERROR) << "Suspiciously big usage_ value: " << usage_
944
0
               << " (could be a result size_t wrapping around below 0, "
945
0
               << "for example as a result of race condition).";
946
0
  }
947
0
}
948
949
template<bool thread_safe>
950
0
size_t Quota<thread_safe>::GetQuota() const {
951
0
  lock_guard_maybe<Mutex> lock(mutex());
952
0
  return GetQuotaInternal();
953
0
}
954
955
template<bool thread_safe>
956
0
size_t Quota<thread_safe>::GetUsage() const {
957
0
  lock_guard_maybe<Mutex> lock(mutex());
958
0
  return usage_;
959
0
}
960
961
template<bool thread_safe>
962
0
void StaticQuota<thread_safe>::SetQuota(const size_t quota) {
963
0
  lock_guard_maybe<Mutex> lock(Quota<thread_safe>::mutex());
964
0
  quota_ = quota;
965
0
}
966
967
template <class T>
968
using EndOfObjectResultType =
969
  typename boost::mpl::if_<boost::is_const<T>, const char*, char*>::type;
970
971
template <class T>
972
EndOfObjectResultType<T>
973
EndOfObject(T* t) {
974
  typedef EndOfObjectResultType<T> ResultType;
975
  return reinterpret_cast<ResultType>(t) + sizeof(T);
976
}
977
978
// There is a shared_from_this() standard function injected into class by extending
979
// std::enable_shared_from_this template. We use this for ReactorTask and MonitoredTask base
980
// classes. But for their subclasses we sometimes need to get shared pointer to specific class
981
// type, for example for DelayedTask we need to get shared_ptr<DelayedTask>.
982
// shared_from_this defined in the base ReactorTask class will return shared_ptr<ReactorTask>.
983
// That is why we defined template free function shared_from which will downcast to shared_pointer
984
// to type deduced from whatever we pass as an argument, shared_ptr<DelayedTask> in this case.
985
template <typename U>
986
99.7M
std::shared_ptr<U> shared_from(U* u) {
987
99.7M
  return std::static_pointer_cast<U>(u->shared_from_this());
988
99.7M
}
_ZN2yb11shared_fromINS_6master17RetryingTSRpcTaskEEENSt3__110shared_ptrIT_EEPS5_
Line
Count
Source
986
573k
std::shared_ptr<U> shared_from(U* u) {
987
573k
  return std::static_pointer_cast<U>(u->shared_from_this());
988
573k
}
_ZN2yb11shared_fromINS_3rpc11InboundCallEEENSt3__110shared_ptrIT_EEPS5_
Line
Count
Source
986
54.0M
std::shared_ptr<U> shared_from(U* u) {
987
54.0M
  return std::static_pointer_cast<U>(u->shared_from_this());
988
54.0M
}
_ZN2yb11shared_fromINS_3rpc12OutboundCallEEENSt3__110shared_ptrIT_EEPS5_
Line
Count
Source
986
37.6M
std::shared_ptr<U> shared_from(U* u) {
987
37.6M
  return std::static_pointer_cast<U>(u->shared_from_this());
988
37.6M
}
_ZN2yb11shared_fromINS_3rpc17LocalOutboundCallEEENSt3__110shared_ptrIT_EEPS5_
Line
Count
Source
986
5.58M
std::shared_ptr<U> shared_from(U* u) {
987
5.58M
  return std::static_pointer_cast<U>(u->shared_from_this());
988
5.58M
}
_ZN2yb11shared_fromINS_3rpc11DelayedTaskEEENSt3__110shared_ptrIT_EEPS5_
Line
Count
Source
986
1.97M
std::shared_ptr<U> shared_from(U* u) {
987
1.97M
  return std::static_pointer_cast<U>(u->shared_from_this());
988
1.97M
}
989
990
template <class U>
991
std::shared_ptr<U> FakeSharedPtr(U* u) {
992
  return std::shared_ptr<U>(std::shared_ptr<U>(), u);
993
}
994
995
// Returns empty string if TCMalloc is not enabled.
996
std::string TcMallocStats();
997
998
}  // namespace yb
999
1000
#endif // YB_UTIL_MEMORY_MEMORY_H_