YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/arena.h
Line
Count
Source (jump to first uncovered line)
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under the BSD-style license found in the
3
//  LICENSE file in the root directory of this source tree. An additional grant
4
//  of patent rights can be found in the PATENTS file in the same directory.
5
//
6
// The following only applies to changes made to this file as part of YugaByte development.
7
//
8
// Portions Copyright (c) YugaByte, Inc.
9
//
10
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
11
// in compliance with the License.  You may obtain a copy of the License at
12
//
13
// http://www.apache.org/licenses/LICENSE-2.0
14
//
15
// Unless required by applicable law or agreed to in writing, software distributed under the License
16
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
17
// or implied.  See the License for the specific language governing permissions and limitations
18
// under the License.
19
//
20
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
21
// Use of this source code is governed by a BSD-style license that can be
22
// found in the LICENSE file. See the AUTHORS file for names of contributors.
23
24
// Arena is an implementation of Allocator class. For a request of small size,
25
// it allocates a block with pre-defined block size. For a request of big
26
// size, it uses malloc to directly get the requested size.
27
28
#ifndef YB_ROCKSDB_UTIL_ARENA_H
29
#define YB_ROCKSDB_UTIL_ARENA_H
30
31
#pragma once
32
#ifndef OS_WIN
33
#include <sys/mman.h>
34
#endif
35
#include <assert.h>
36
#include <stdint.h>
37
38
#include <cstddef>
39
#include <cerrno>
40
#include <vector>
41
42
#include "yb/rocksdb/util/allocator.h"
43
#include "yb/rocksdb/util/mutexlock.h"
44
45
namespace yb {
46
47
class MemTracker;
48
49
}
50
51
namespace rocksdb {
52
53
class Arena : public Allocator {
54
 public:
55
  // No copying allowed
56
  Arena(const Arena&) = delete;
57
  void operator=(const Arena&) = delete;
58
59
  static const size_t kInlineSize = 2048;
60
  static const size_t kMinBlockSize;
61
  static const size_t kMaxBlockSize;
62
63
  // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
64
  // supported hugepage size of the system), block allocation will try huge
65
  // page TLB first. If allocation fails, will fall back to normal case.
66
  explicit Arena(size_t block_size = kMinBlockSize, size_t huge_page_size = 0);
67
  ~Arena();
68
69
  char* Allocate(size_t bytes) override;
70
71
  void SetMemTracker(std::shared_ptr<yb::MemTracker> mem_tracker);
72
73
  // huge_page_size: if >0, will try to allocate from huage page TLB.
74
  // The argument will be the size of the page size for huge page TLB. Bytes
75
  // will be rounded up to multiple of the page size to allocate through mmap
76
  // anonymous option with huge page on. The extra  space allocated will be
77
  // wasted. If allocation fails, will fall back to normal case. To enable it,
78
  // need to reserve huge pages for it to be allocated, like:
79
  //     sysctl -w vm.nr_hugepages=20
80
  // See linux doc Documentation/vm/hugetlbpage.txt for details.
81
  // huge page allocation can fail. In this case it will fail back to
82
  // normal cases. The messages will be logged to logger. So when calling with
83
  // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
84
  // Otherwise, the error message will be printed out to stderr directly.
85
  char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
86
                        Logger* logger = nullptr) override;
87
88
  // Returns an estimate of the total memory usage of data allocated
89
  // by the arena (exclude the space allocated but not yet used for future
90
  // allocations).
91
508k
  size_t ApproximateMemoryUsage() const {
92
508k
    return blocks_memory_ + blocks_.capacity() * sizeof(char*) -
93
508k
           alloc_bytes_remaining_;
94
508k
  }
95
96
152M
  size_t MemoryAllocatedBytes() const { return blocks_memory_; }
97
98
152M
  size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
99
100
  // If an allocation is too big, we'll allocate an irregular block with the
101
  // same size of that allocation.
102
152M
  size_t IrregularBlockNum() const { return irregular_block_num; }
103
104
1.30k
  size_t BlockSize() const override { return kBlockSize; }
105
106
 private:
107
  void Consumed(size_t size);
108
109
  char inline_block_[kInlineSize] __attribute__((__aligned__(sizeof(void*))));
110
  // Number of bytes allocated in one block
111
  const size_t kBlockSize;
112
  // Array of new[] allocated memory blocks
113
  typedef std::vector<char*> Blocks;
114
  Blocks blocks_;
115
116
  struct MmapInfo {
117
    void* addr_;
118
    size_t length_;
119
120
0
    MmapInfo(void* addr, size_t length) : addr_(addr), length_(length) {}
121
  };
122
  std::vector<MmapInfo> huge_blocks_;
123
  size_t irregular_block_num = 0;
124
125
  // Stats for current active block.
126
  // For each block, we allocate aligned memory chucks from one end and
127
  // allocate unaligned memory chucks from the other end. Otherwise the
128
  // memory waste for alignment will be higher if we allocate both types of
129
  // memory from one direction.
130
  char* unaligned_alloc_ptr_ = nullptr;
131
  char* aligned_alloc_ptr_ = nullptr;
132
  // How many bytes left in currently active block?
133
  size_t alloc_bytes_remaining_ = 0;
134
135
#ifdef MAP_HUGETLB
136
  size_t hugetlb_size_ = 0;
137
#endif  // MAP_HUGETLB
138
  char* AllocateFromHugePage(size_t bytes);
139
  char* AllocateFallback(size_t bytes, bool aligned);
140
  char* AllocateNewBlock(size_t block_bytes);
141
142
  // Bytes of memory in blocks allocated so far
143
  size_t blocks_memory_ = 0;
144
  std::shared_ptr<yb::MemTracker> mem_tracker_;
145
};
146
147
8.02M
inline char* Arena::Allocate(size_t bytes) {
148
  // The semantics of what to return are a bit messy if we allow
149
  // 0-byte allocations, so we disallow them here (we don't need
150
  // them for our internal use).
151
8.02M
  assert(bytes > 0);
152
8.02M
  if (bytes <= alloc_bytes_remaining_) {
153
8.02M
    unaligned_alloc_ptr_ -= bytes;
154
8.02M
    alloc_bytes_remaining_ -= bytes;
155
8.02M
    return unaligned_alloc_ptr_;
156
8.02M
  }
157
2.36k
  return AllocateFallback(bytes, false /* unaligned */);
158
2.36k
}
159
160
// check and adjust the block_size so that the return value is
161
//  1. in the range of [kMinBlockSize, kMaxBlockSize].
162
//  2. the multiple of align unit.
163
extern size_t OptimizeBlockSize(size_t block_size);
164
165
}  // namespace rocksdb
166
167
#endif // YB_ROCKSDB_UTIL_ARENA_H