/Users/deen/code/yugabyte-db/src/yb/util/file_system_mem.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) YugaByte, Inc. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
4 | | // in compliance with the License. You may obtain a copy of the License at |
5 | | // |
6 | | // http://www.apache.org/licenses/LICENSE-2.0 |
7 | | // |
8 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
9 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
10 | | // or implied. See the License for the specific language governing permissions and limitations |
11 | | // under the License. |
12 | | // |
13 | | |
14 | | #include "yb/util/file_system_mem.h" |
15 | | |
16 | | #include "yb/util/malloc.h" |
17 | | #include "yb/util/result.h" |
18 | | #include "yb/util/status_format.h" |
19 | | |
20 | | namespace yb { |
21 | | |
22 | 59 | Status InMemoryFileState::Read(uint64_t offset, size_t n, Slice* result, uint8_t* scratch) const { |
23 | 59 | if (offset > size_) { |
24 | 2 | return STATUS_FORMAT(IOError, "Offset ($0) greater than file size ($1).", offset, size_); |
25 | 2 | } |
26 | 57 | const uint64_t available = size_ - offset; |
27 | 57 | if (n > available) { |
28 | 19 | n = available; |
29 | 19 | } |
30 | 57 | if (n == 0) { |
31 | 8 | *result = Slice(); |
32 | 8 | return Status::OK(); |
33 | 8 | } |
34 | | |
35 | 49 | size_t block = offset / kBlockSize; |
36 | 49 | size_t block_offset = offset % kBlockSize; |
37 | | |
38 | 49 | if (n <= kBlockSize - block_offset) { |
39 | | // The requested bytes are all in the first block. |
40 | 46 | *result = Slice(blocks_[block] + block_offset, n); |
41 | 46 | return Status::OK(); |
42 | 46 | } |
43 | | |
44 | 3 | size_t bytes_to_copy = n; |
45 | 3 | uint8_t* dst = scratch; |
46 | | |
47 | 81 | while (bytes_to_copy > 0) { |
48 | 78 | size_t avail = kBlockSize - block_offset; |
49 | 78 | if (avail > bytes_to_copy) { |
50 | 3 | avail = bytes_to_copy; |
51 | 3 | } |
52 | 78 | memcpy(dst, blocks_[block] + block_offset, avail); |
53 | | |
54 | 78 | bytes_to_copy -= avail; |
55 | 78 | dst += avail; |
56 | 78 | block++; |
57 | 78 | block_offset = 0; |
58 | 78 | } |
59 | | |
60 | 3 | *result = Slice(scratch, n); |
61 | 3 | return Status::OK(); |
62 | 3 | } |
63 | | |
64 | 0 | Status InMemoryFileState::PreAllocate(uint64_t size) { |
65 | 0 | std::vector<uint8_t> padding(static_cast<size_t>(size), static_cast<uint8_t>(0)); |
66 | | // TODO optimize me |
67 | 0 | memset(padding.data(), 0, sizeof(uint8_t)); |
68 | | // Clang analyzer thinks the function below can thrown an exception and cause the "padding" |
69 | | // memory to leak. |
70 | 0 | Status s = AppendRaw(padding.data(), size); |
71 | 0 | size_ -= size; |
72 | 0 | return s; |
73 | 0 | } |
74 | | |
75 | 82 | Status InMemoryFileState::Append(const Slice& data) { |
76 | 82 | return AppendRaw(data.data(), data.size()); |
77 | 82 | } |
78 | | |
79 | 82 | Status InMemoryFileState::AppendRaw(const uint8_t *src, size_t src_len) { |
80 | 255 | while (src_len > 0) { |
81 | 173 | size_t avail; |
82 | 173 | size_t offset = size_ % kBlockSize; |
83 | | |
84 | 173 | if (offset != 0) { |
85 | | // There is some room in the last block. |
86 | 43 | avail = kBlockSize - offset; |
87 | 130 | } else { |
88 | | // No room in the last block; push new one. |
89 | 130 | blocks_.push_back(new uint8_t[kBlockSize]); |
90 | 130 | avail = kBlockSize; |
91 | 130 | } |
92 | | |
93 | 173 | if (avail > src_len) { |
94 | 81 | avail = src_len; |
95 | 81 | } |
96 | 173 | memcpy(blocks_.back() + offset, src, avail); |
97 | 173 | src_len -= avail; |
98 | 173 | src += avail; |
99 | 173 | size_ += avail; |
100 | 173 | } |
101 | | |
102 | 82 | return Status::OK(); |
103 | 82 | } |
104 | | |
105 | 0 | size_t InMemoryFileState::memory_footprint() const { |
106 | 0 | size_t size = malloc_usable_size(this); |
107 | 0 | if (blocks_.capacity() > 0) { |
108 | 0 | size += malloc_usable_size(blocks_.data()); |
109 | 0 | } |
110 | 0 | for (uint8_t* block : blocks_) { |
111 | 0 | size += malloc_usable_size(block); |
112 | 0 | } |
113 | 0 | size += filename_.capacity(); |
114 | 0 | return size; |
115 | 0 | } |
116 | | |
117 | | |
118 | 23 | Status InMemorySequentialFile::Read(size_t n, Slice* result, uint8_t* scratch) { |
119 | 23 | Status s = file_->Read(pos_, n, result, scratch); |
120 | 23 | if (s.ok()) { |
121 | 23 | pos_ += result->size(); |
122 | 23 | } |
123 | 23 | return s; |
124 | 23 | } |
125 | | |
126 | 4 | Status InMemorySequentialFile::Skip(uint64_t n) { |
127 | 4 | if (pos_ > file_->Size()) { |
128 | 0 | return STATUS(IOError, "pos_ > file_->Size()"); |
129 | 0 | } |
130 | 4 | const size_t available = file_->Size() - pos_; |
131 | 4 | if (n > available) { |
132 | 2 | n = available; |
133 | 2 | } |
134 | 4 | pos_ += n; |
135 | 4 | return Status::OK(); |
136 | 4 | } |
137 | | |
138 | | Status InMemoryRandomAccessFile::Read( |
139 | 21 | uint64_t offset, size_t n, Slice* result, uint8_t* scratch) const { |
140 | 21 | return file_->Read(offset, n, result, scratch); |
141 | 21 | } |
142 | | |
143 | 0 | Result<uint64_t> InMemoryRandomAccessFile::Size() const { |
144 | 0 | return file_->Size(); |
145 | 0 | } |
146 | | |
147 | 0 | Result<uint64_t> InMemoryRandomAccessFile::INode() const { |
148 | 0 | return 0; |
149 | 0 | } |
150 | | |
151 | | } // namespace yb |