/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/arena.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
21 | | // Use of this source code is governed by a BSD-style license that can be |
22 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
23 | | |
24 | | #include "yb/rocksdb/util/arena.h" |
25 | | |
26 | | #include <algorithm> |
27 | | |
28 | | #ifdef ROCKSDB_MALLOC_USABLE_SIZE |
29 | | #include <malloc.h> |
30 | | #endif |
31 | | #ifndef OS_WIN |
32 | | #include <sys/mman.h> |
33 | | #endif |
34 | | |
35 | | #include "yb/rocksdb/port/port.h" |
36 | | #include "yb/rocksdb/env.h" |
37 | | |
38 | | #include "yb/util/mem_tracker.h" |
39 | | |
40 | | namespace rocksdb { |
41 | | |
42 | | // MSVC complains that it is already defined since it is static in the header. |
43 | | #ifndef OS_WIN |
44 | | const size_t Arena::kInlineSize; |
45 | | #endif |
46 | | |
47 | | const size_t Arena::kMinBlockSize = 4096; |
48 | | const size_t Arena::kMaxBlockSize = 2 << 30; |
49 | | static const int kAlignUnit = sizeof(void*); |
50 | | |
51 | 41.7M | size_t OptimizeBlockSize(size_t block_size) { |
52 | | // Make sure block_size is in optimal range |
53 | 41.7M | block_size = std::max(Arena::kMinBlockSize, block_size); |
54 | 41.7M | block_size = std::min(Arena::kMaxBlockSize, block_size); |
55 | | |
56 | | // make sure block_size is the multiple of kAlignUnit |
57 | 41.7M | if (block_size % kAlignUnit != 0) { |
58 | 0 | block_size = (1 + block_size / kAlignUnit) * kAlignUnit; |
59 | 0 | } |
60 | | |
61 | 41.7M | return block_size; |
62 | 41.7M | } |
63 | | |
64 | | Arena::Arena(size_t block_size, size_t huge_page_size) |
65 | 41.2M | : kBlockSize(OptimizeBlockSize(block_size)) { |
66 | 41.2M | assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize && |
67 | 41.2M | kBlockSize % kAlignUnit == 0); |
68 | 0 | alloc_bytes_remaining_ = sizeof(inline_block_); |
69 | 41.2M | blocks_memory_ += alloc_bytes_remaining_; |
70 | 41.2M | aligned_alloc_ptr_ = inline_block_; |
71 | 41.2M | unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_; |
72 | | #ifdef MAP_HUGETLB |
73 | | hugetlb_size_ = huge_page_size; |
74 | | if (hugetlb_size_ && kBlockSize > hugetlb_size_) { |
75 | | hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_; |
76 | | } |
77 | | #endif |
78 | 41.2M | } |
79 | | |
80 | 41.1M | Arena::~Arena() { |
81 | 41.1M | for (const auto& block : blocks_) { |
82 | 965k | delete[] block; |
83 | 965k | } |
84 | | |
85 | | #ifdef MAP_HUGETLB |
86 | | for (const auto& mmap_info : huge_blocks_) { |
87 | | auto ret = munmap(mmap_info.addr_, mmap_info.length_); |
88 | | if (ret != 0) { |
89 | | // TODO(sdong): Better handling |
90 | | } |
91 | | } |
92 | | #endif |
93 | | |
94 | 41.1M | if (mem_tracker_) { |
95 | 386k | mem_tracker_->Release(blocks_memory_); |
96 | 386k | } |
97 | 41.1M | } |
98 | | |
99 | 1.17M | char* Arena::AllocateFallback(size_t bytes, bool aligned) { |
100 | 1.17M | if (bytes > kBlockSize / 4) { |
101 | 137k | ++irregular_block_num; |
102 | | // Object is more than a quarter of our block size. Allocate it separately |
103 | | // to avoid wasting too much space in leftover bytes. |
104 | 137k | return AllocateNewBlock(bytes); |
105 | 137k | } |
106 | | |
107 | | // We waste the remaining space in the current block. |
108 | 1.03M | size_t size = 0; |
109 | 1.03M | char* block_head = nullptr; |
110 | | #ifdef MAP_HUGETLB |
111 | | if (hugetlb_size_) { |
112 | | size = hugetlb_size_; |
113 | | block_head = AllocateFromHugePage(size); |
114 | | } |
115 | | #endif |
116 | 1.03M | if (!block_head) { |
117 | 1.03M | size = kBlockSize; |
118 | 1.03M | block_head = AllocateNewBlock(size); |
119 | 1.03M | } |
120 | 1.03M | alloc_bytes_remaining_ = size - bytes; |
121 | | |
122 | 1.03M | if (aligned) { |
123 | 1.03M | aligned_alloc_ptr_ = block_head + bytes; |
124 | 1.03M | unaligned_alloc_ptr_ = block_head + size; |
125 | 1.03M | return block_head; |
126 | 1.03M | } else { |
127 | 3.52k | aligned_alloc_ptr_ = block_head; |
128 | 3.52k | unaligned_alloc_ptr_ = block_head + size - bytes; |
129 | 3.52k | return unaligned_alloc_ptr_; |
130 | 3.52k | } |
131 | 1.03M | } |
132 | | |
133 | 0 | char* Arena::AllocateFromHugePage(size_t bytes) { |
134 | | #ifdef MAP_HUGETLB |
135 | | if (hugetlb_size_ == 0) { |
136 | | return nullptr; |
137 | | } |
138 | | // already reserve space in huge_blocks_ before calling mmap(). |
139 | | // this way the insertion into the vector below will not throw and we |
140 | | // won't leak the mapping in that case. if reserve() throws, we |
141 | | // won't leak either |
142 | | huge_blocks_.reserve(huge_blocks_.size() + 1); |
143 | | |
144 | | void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE), |
145 | | (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0); |
146 | | |
147 | | if (addr == MAP_FAILED) { |
148 | | return nullptr; |
149 | | } |
150 | | // the following shouldn't throw because of the above reserve() |
151 | | huge_blocks_.emplace_back(MmapInfo(addr, bytes)); |
152 | | return reinterpret_cast<char*>(addr); |
153 | | #else |
154 | 0 | return nullptr; |
155 | 0 | #endif |
156 | 0 | } |
157 | | |
158 | | char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size, |
159 | 589M | Logger* logger) { |
160 | 589M | assert((kAlignUnit & (kAlignUnit - 1)) == |
161 | 589M | 0); // Pointer size should be a power of 2 |
162 | | |
163 | | #ifdef MAP_HUGETLB |
164 | | if (huge_page_size > 0 && bytes > 0) { |
165 | | // Allocate from a huge page TBL table. |
166 | | assert(logger != nullptr); // logger need to be passed in. |
167 | | size_t reserved_size = |
168 | | ((bytes - 1U) / huge_page_size + 1U) * huge_page_size; |
169 | | assert(reserved_size >= bytes); |
170 | | |
171 | | char* addr = AllocateFromHugePage(reserved_size); |
172 | | if (addr == nullptr) { |
173 | | RWARN(logger, "AllocateAligned fail to allocate huge TLB pages: %s", |
174 | | strerror(errno)); |
175 | | // fail back to malloc |
176 | | } else { |
177 | | return addr; |
178 | | } |
179 | | } |
180 | | #endif |
181 | | |
182 | 0 | size_t current_mod = |
183 | 589M | reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1); |
184 | 589M | size_t slop = (current_mod == 0 ? 0587M : kAlignUnit - current_mod2.38M ); |
185 | 589M | size_t needed = bytes + slop; |
186 | 589M | char* result; |
187 | 589M | if (needed <= alloc_bytes_remaining_) { |
188 | 588M | result = aligned_alloc_ptr_ + slop; |
189 | 588M | aligned_alloc_ptr_ += needed; |
190 | 588M | alloc_bytes_remaining_ -= needed; |
191 | 588M | } else { |
192 | | // AllocateFallback always returns aligned memory |
193 | 1.07M | result = AllocateFallback(bytes, true /* aligned */); |
194 | 1.07M | } |
195 | 589M | assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0); |
196 | 0 | return result; |
197 | 589M | } |
198 | | |
199 | 1.17M | char* Arena::AllocateNewBlock(size_t block_bytes) { |
200 | | // already reserve space in blocks_ before allocating memory via new. |
201 | | // this way the insertion into the vector below will not throw and we |
202 | | // won't leak the allocated memory in that case. if reserve() throws, |
203 | | // we won't leak either |
204 | 1.17M | blocks_.reserve(blocks_.size() + 1); |
205 | | |
206 | 1.17M | char* block = new char[block_bytes]; |
207 | | |
208 | | #ifdef ROCKSDB_MALLOC_USABLE_SIZE |
209 | | Consumed(malloc_usable_size(block)); |
210 | | #else |
211 | 1.17M | Consumed(block_bytes); |
212 | 1.17M | #endif // ROCKSDB_MALLOC_USABLE_SIZE |
213 | | // the following shouldn't throw because of the above reserve() |
214 | 1.17M | blocks_.push_back(block); |
215 | 1.17M | return block; |
216 | 1.17M | } |
217 | | |
218 | 1.17M | void Arena::Consumed(size_t size) { |
219 | 1.17M | blocks_memory_ += size; |
220 | 1.17M | if (mem_tracker_) { |
221 | 601k | mem_tracker_->Consume(size); |
222 | 601k | } |
223 | 1.17M | } |
224 | | |
225 | 426k | void Arena::SetMemTracker(yb::MemTrackerPtr mem_tracker) { |
226 | 426k | mem_tracker_ = std::move(mem_tracker); |
227 | 426k | mem_tracker_->Consume(blocks_memory_); |
228 | 426k | } |
229 | | |
230 | | } // namespace rocksdb |