/Users/deen/code/yugabyte-db/src/yb/gutil/endian.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2005 Google Inc. |
2 | | // |
3 | | // Licensed to the Apache Software Foundation (ASF) under one |
4 | | // or more contributor license agreements. See the NOTICE file |
5 | | // distributed with this work for additional information |
6 | | // regarding copyright ownership. The ASF licenses this file |
7 | | // to you under the Apache License, Version 2.0 (the |
8 | | // "License"); you may not use this file except in compliance |
9 | | // with the License. You may obtain a copy of the License at |
10 | | // |
11 | | // http://www.apache.org/licenses/LICENSE-2.0 |
12 | | // |
13 | | // Unless required by applicable law or agreed to in writing, |
14 | | // software distributed under the License is distributed on an |
15 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
16 | | // KIND, either express or implied. See the License for the |
17 | | // specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // The following only applies to changes made to this file as part of YugaByte development. |
21 | | // |
22 | | // Portions Copyright (c) YugaByte, Inc. |
23 | | // |
24 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
25 | | // in compliance with the License. You may obtain a copy of the License at |
26 | | // |
27 | | // http://www.apache.org/licenses/LICENSE-2.0 |
28 | | // |
29 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
30 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
31 | | // or implied. See the License for the specific language governing permissions and limitations |
32 | | // under the License. |
33 | | // |
34 | | // --- |
35 | | // |
36 | | // |
37 | | // Utility functions that depend on bytesex. We define htonll and ntohll, |
38 | | // as well as "Google" versions of all the standards: ghtonl, ghtons, and |
39 | | // so on. These functions do exactly the same as their standard variants, |
40 | | // but don't require including the dangerous netinet/in.h. |
41 | | // |
42 | | // Buffer routines will copy to and from buffers without causing |
43 | | // a bus error when the architecture requires differnt byte alignments |
44 | | #ifndef YB_GUTIL_ENDIAN_H |
45 | | #define YB_GUTIL_ENDIAN_H |
46 | | |
47 | | #include <assert.h> |
48 | | |
49 | | #include "yb/gutil/int128.h" |
50 | | #include "yb/gutil/integral_types.h" |
51 | | #include "yb/gutil/port.h" |
52 | | |
53 | 307M | inline uint64 gbswap_64(uint64 host_int) { |
54 | | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) |
55 | | // Adapted from /usr/include/byteswap.h. Not available on Mac. |
56 | | if (__builtin_constant_p(host_int)) { |
57 | | return __bswap_constant_64(host_int); |
58 | | } else { |
59 | | uint64 result; |
60 | | __asm__("bswap %0" : "=r" (result) : "0" (host_int)); |
61 | | return result; |
62 | | } |
63 | | #elif defined(bswap_64) |
64 | 153M | return bswap_64(host_int); |
65 | | #else |
66 | | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | |
67 | | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); |
68 | | #endif // bswap_64 |
69 | 307M | } Line | Count | Source | 53 | 153M | inline uint64 gbswap_64(uint64 host_int) { | 54 | | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) | 55 | | // Adapted from /usr/include/byteswap.h. Not available on Mac. | 56 | | if (__builtin_constant_p(host_int)) { | 57 | | return __bswap_constant_64(host_int); | 58 | | } else { | 59 | | uint64 result; | 60 | | __asm__("bswap %0" : "=r" (result) : "0" (host_int)); | 61 | | return result; | 62 | | } | 63 | | #elif defined(bswap_64) | 64 | 153M | return bswap_64(host_int); | 65 | | #else | 66 | | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | | 67 | | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); | 68 | | #endif // bswap_64 | 69 | 153M | } |
Line | Count | Source | 53 | 153M | inline uint64 gbswap_64(uint64 host_int) { | 54 | | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) | 55 | | // Adapted from /usr/include/byteswap.h. Not available on Mac. | 56 | | if (__builtin_constant_p(host_int)) { | 57 | | return __bswap_constant_64(host_int); | 58 | | } else { | 59 | | uint64 result; | 60 | | __asm__("bswap %0" : "=r" (result) : "0" (host_int)); | 61 | | return result; | 62 | | } | 63 | | #elif defined(bswap_64) | 64 | 153M | return bswap_64(host_int); | 65 | | #else | 66 | | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | | 67 | | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); | 68 | | #endif // bswap_64 | 69 | 153M | } |
|
70 | | |
71 | | #ifdef IS_LITTLE_ENDIAN |
72 | | |
73 | | // Definitions for ntohl etc. that don't require us to include |
74 | | // netinet/in.h. We wrap bswap_32 and bswap_16 in functions rather |
75 | | // than just #defining them because in debug mode, gcc doesn't |
76 | | // correctly handle the (rather involved) definitions of bswap_32. |
77 | | // gcc guarantees that inline functions are as fast as macros, so |
78 | | // this isn't a performance hit. |
79 | 0 | inline uint16 ghtons(uint16 x) { return bswap_16(x); } |
80 | 0 | inline uint32 ghtonl(uint32 x) { return bswap_32(x); } |
81 | 0 | inline uint64 ghtonll(uint64 x) { return gbswap_64(x); } |
82 | | |
83 | | #elif defined IS_BIG_ENDIAN |
84 | | |
85 | | // These definitions are simpler on big-endian machines |
86 | | // These are functions instead of macros to avoid self-assignment warnings |
87 | | // on calls such as "i = ghtnol(i);". This also provides type checking. |
88 | | inline uint16 ghtons(uint16 x) { return x; } |
89 | | inline uint32 ghtonl(uint32 x) { return x; } |
90 | | inline uint64 ghtonll(uint64 x) { return x; } |
91 | | |
92 | | #else |
93 | | #error "Unsupported bytesex: Either IS_BIG_ENDIAN or IS_LITTLE_ENDIAN must be defined" // NOLINT |
94 | | #endif // bytesex |
95 | | |
96 | | |
97 | | // ntoh* and hton* are the same thing for any size and bytesex, |
98 | | // since the function is an involution, i.e., its own inverse. |
99 | | #define gntohl(x) ghtonl(x) |
100 | | #define gntohs(x) ghtons(x) |
101 | | #define gntohll(x) ghtonll(x) |
102 | | #if !defined(__APPLE__) |
103 | | // This one is safe to take as it's an extension |
104 | | #define htonll(x) ghtonll(x) |
105 | | #define ntohll(x) htonll(x) |
106 | | #endif |
107 | | |
108 | | // Utilities to convert numbers between the current hosts's native byte |
109 | | // order and little-endian byte order |
110 | | // |
111 | | // Load/Store methods are alignment safe |
112 | | class LittleEndian { |
113 | | public: |
114 | | // Conversion functions. |
115 | | #ifdef IS_LITTLE_ENDIAN |
116 | | |
117 | 0 | static uint16 FromHost16(uint16 x) { return x; } |
118 | 0 | static uint16 ToHost16(uint16 x) { return x; } |
119 | | |
120 | 3.55M | static uint32 FromHost32(uint32 x) { return x; } |
121 | 3.31M | static uint32 ToHost32(uint32 x) { return x; } |
122 | | |
123 | 21.0M | static uint64 FromHost64(uint64 x) { return x; } |
124 | 2.72M | static uint64 ToHost64(uint64 x) { return x; } |
125 | | |
126 | 0 | static bool IsLittleEndian() { return true; } |
127 | | |
128 | | #elif defined IS_BIG_ENDIAN |
129 | | |
130 | | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
131 | | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
132 | | |
133 | | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
134 | | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
135 | | |
136 | | static uint64 FromHost64(uint64 x) { return gbswap_64(x); } |
137 | | static uint64 ToHost64(uint64 x) { return gbswap_64(x); } |
138 | | |
139 | | static bool IsLittleEndian() { return false; } |
140 | | |
141 | | #endif /* ENDIAN */ |
142 | | |
143 | | // Functions to do unaligned loads and stores in little-endian order. |
144 | 0 | static uint16 Load16(const void *p) { |
145 | 0 | return ToHost16(UNALIGNED_LOAD16(p)); |
146 | 0 | } |
147 | | |
148 | 0 | static void Store16(void *p, uint16 v) { |
149 | 0 | UNALIGNED_STORE16(p, FromHost16(v)); |
150 | 0 | } |
151 | | |
152 | 3.31M | static uint32 Load32(const void *p) { |
153 | 3.31M | return ToHost32(UNALIGNED_LOAD32(p)); |
154 | 3.31M | } |
155 | | |
156 | 3.55M | static void Store32(void *p, uint32 v) { |
157 | 3.55M | UNALIGNED_STORE32(p, FromHost32(v)); |
158 | 3.55M | } |
159 | | |
160 | 79.0k | static uint64 Load64(const void *p) { |
161 | 79.0k | return ToHost64(UNALIGNED_LOAD64(p)); |
162 | 79.0k | } |
163 | | |
164 | | // Build a uint64 from 1-8 bytes. |
165 | | // 8 * len least significant bits are loaded from the memory with |
166 | | // LittleEndian order. The 64 - 8 * len most significant bits are |
167 | | // set all to 0. |
168 | | // In latex-friendly words, this function returns: |
169 | | // $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned. |
170 | | // |
171 | | // This function is equivalent with: |
172 | | // uint64 val = 0; |
173 | | // memcpy(&val, p, len); |
174 | | // return ToHost64(val); |
175 | | // TODO(user): write a small benchmark and benchmark the speed |
176 | | // of a memcpy based approach. |
177 | | // |
178 | | // For speed reasons this function does not work for len == 0. |
179 | | // The caller needs to guarantee that 1 <= len <= 8. |
180 | 0 | static uint64 Load64VariableLength(const void * const p, int len) { |
181 | 0 | assert(len >= 1 && len <= 8); |
182 | 0 | const char * const buf = static_cast<const char *>(p); |
183 | 0 | uint64 val = 0; |
184 | 0 | --len; |
185 | 0 | do { |
186 | 0 | val = (val << 8) | buf[len]; |
187 | 0 | // (--len >= 0) is about 10 % faster than (len--) in some benchmarks. |
188 | 0 | } while (--len >= 0); |
189 | 0 | // No ToHost64(...) needed. The bytes are accessed in little-endian manner |
190 | 0 | // on every architecture. |
191 | 0 | return val; |
192 | 0 | } |
193 | | |
194 | 19.2k | static void Store64(void *p, uint64 v) { |
195 | 19.2k | UNALIGNED_STORE64(p, FromHost64(v)); |
196 | 19.2k | } |
197 | | |
198 | 0 | static uint128 Load128(const void *p) { |
199 | 0 | return uint128( |
200 | 0 | ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64 *>(p) + 1)), |
201 | 0 | ToHost64(UNALIGNED_LOAD64(p))); |
202 | 0 | } |
203 | | |
204 | 0 | static void Store128(void *p, const uint128 v) { |
205 | 0 | UNALIGNED_STORE64(p, FromHost64(Uint128Low64(v))); |
206 | 0 | UNALIGNED_STORE64(reinterpret_cast<uint64 *>(p) + 1, |
207 | 0 | FromHost64(Uint128High64(v))); |
208 | 0 | } |
209 | | |
210 | | // Build a uint128 from 1-16 bytes. |
211 | | // 8 * len least significant bits are loaded from the memory with |
212 | | // LittleEndian order. The 128 - 8 * len most significant bits are |
213 | | // set all to 0. |
214 | 0 | static uint128 Load128VariableLength(const void *p, int len) { |
215 | 0 | if (len <= 8) { |
216 | 0 | return uint128(Load64VariableLength(p, len)); |
217 | 0 | } else { |
218 | 0 | return uint128( |
219 | 0 | Load64VariableLength(static_cast<const char *>(p) + 8, len - 8), |
220 | 0 | Load64(p)); |
221 | 0 | } |
222 | 0 | } |
223 | | |
224 | | // Load & Store in machine's word size. |
225 | 0 | static uword_t LoadUnsignedWord(const void *p) { |
226 | 0 | if (sizeof(uword_t) == 8) |
227 | 0 | return Load64(p); |
228 | 0 | else |
229 | 0 | return Load32(p); |
230 | 0 | } |
231 | | |
232 | 0 | static void StoreUnsignedWord(void *p, uword_t v) { |
233 | 0 | if (sizeof(v) == 8) |
234 | 0 | Store64(p, v); |
235 | 0 | else |
236 | 0 | Store32(p, static_cast<uint32_t>(v)); |
237 | 0 | } |
238 | | }; |
239 | | |
240 | | // Utilities to convert numbers between the current hosts's native byte |
241 | | // order and big-endian byte order (same as network byte order) |
242 | | // |
243 | | // Load/Store methods are alignment safe |
244 | | class BigEndian { |
245 | | public: |
246 | | #ifdef IS_LITTLE_ENDIAN |
247 | | |
248 | 81.0M | static uint16 FromHost16(uint16 x) { return bswap_16(x); } _ZN9BigEndian10FromHost16Et Line | Count | Source | 248 | 40.5M | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
_ZN9BigEndian10FromHost16Et Line | Count | Source | 248 | 40.5M | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
|
249 | 486M | static uint16 ToHost16(uint16 x) { return bswap_16(x); } Line | Count | Source | 249 | 243M | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
Line | Count | Source | 249 | 243M | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
|
250 | | |
251 | 597M | static uint32 FromHost32(uint32 x) { return bswap_32(x); } _ZN9BigEndian10FromHost32Ej Line | Count | Source | 251 | 298M | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
_ZN9BigEndian10FromHost32Ej Line | Count | Source | 251 | 298M | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
|
252 | 1.15G | static uint32 ToHost32(uint32 x) { return bswap_32(x); } Line | Count | Source | 252 | 576M | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
Line | Count | Source | 252 | 576M | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
|
253 | | |
254 | 50.0M | static uint64 FromHost64(uint64 x) { return gbswap_64(x); } |
255 | 103M | static uint64 ToHost64(uint64 x) { return gbswap_64(x); } |
256 | | |
257 | 0 | static bool IsLittleEndian() { return true; } |
258 | | |
259 | | #elif defined IS_BIG_ENDIAN |
260 | | |
261 | | static uint16 FromHost16(uint16 x) { return x; } |
262 | | static uint16 ToHost16(uint16 x) { return x; } |
263 | | |
264 | | static uint32 FromHost32(uint32 x) { return x; } |
265 | | static uint32 ToHost32(uint32 x) { return x; } |
266 | | |
267 | | static uint64 FromHost64(uint64 x) { return x; } |
268 | | static uint64 ToHost64(uint64 x) { return x; } |
269 | | |
270 | | static bool IsLittleEndian() { return false; } |
271 | | |
272 | | #endif /* ENDIAN */ |
273 | | // Functions to do unaligned loads and stores in little-endian order. |
274 | 243M | static uint16 Load16(const void *p) { |
275 | 243M | return ToHost16(UNALIGNED_LOAD16(p)); |
276 | 243M | } |
277 | | |
278 | 40.4M | static void Store16(void *p, uint16 v) { |
279 | 40.4M | UNALIGNED_STORE16(p, FromHost16(v)); |
280 | 40.4M | } |
281 | | |
282 | 576M | static uint32 Load32(const void *p) { |
283 | 576M | return ToHost32(UNALIGNED_LOAD32(p)); |
284 | 576M | } |
285 | | |
286 | 278M | static void Store32(void *p, uint32 v) { |
287 | 278M | UNALIGNED_STORE32(p, FromHost32(v)); |
288 | 278M | } |
289 | | |
290 | 103M | static uint64 Load64(const void *p) { |
291 | 103M | return ToHost64(UNALIGNED_LOAD64(p)); |
292 | 103M | } |
293 | | |
294 | | // Build a uint64 from 1-8 bytes. |
295 | | // 8 * len least significant bits are loaded from the memory with |
296 | | // BigEndian order. The 64 - 8 * len most significant bits are |
297 | | // set all to 0. |
298 | | // In latex-friendly words, this function returns: |
299 | | // $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned. |
300 | | // |
301 | | // This function is equivalent with: |
302 | | // uint64 val = 0; |
303 | | // memcpy(&val, p, len); |
304 | | // return ToHost64(val); |
305 | | // TODO(user): write a small benchmark and benchmark the speed |
306 | | // of a memcpy based approach. |
307 | | // |
308 | | // For speed reasons this function does not work for len == 0. |
309 | | // The caller needs to guarantee that 1 <= len <= 8. |
310 | 0 | static uint64 Load64VariableLength(const void * const p, int len) { |
311 | 0 | assert(len >= 1 && len <= 8); |
312 | 0 | uint64 val = Load64(p); |
313 | 0 | uint64 mask = 0; |
314 | 0 | --len; |
315 | 0 | do { |
316 | 0 | mask = (mask << 8) | 0xff; |
317 | 0 | // (--len >= 0) is about 10 % faster than (len--) in some benchmarks. |
318 | 0 | } while (--len >= 0); |
319 | 0 | return val & mask; |
320 | 0 | } |
321 | | |
322 | 50.0M | static void Store64(void *p, uint64 v) { |
323 | 50.0M | UNALIGNED_STORE64(p, FromHost64(v)); |
324 | 50.0M | } |
325 | | |
326 | 0 | static uint128 Load128(const void *p) { |
327 | 0 | return uint128( |
328 | 0 | ToHost64(UNALIGNED_LOAD64(p)), |
329 | 0 | ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64 *>(p) + 1))); |
330 | 0 | } |
331 | | |
332 | 0 | static void Store128(void *p, const uint128 v) { |
333 | 0 | UNALIGNED_STORE64(p, FromHost64(Uint128High64(v))); |
334 | 0 | UNALIGNED_STORE64(reinterpret_cast<uint64 *>(p) + 1, |
335 | 0 | FromHost64(Uint128Low64(v))); |
336 | 0 | } |
337 | | |
338 | | // Build a uint128 from 1-16 bytes. |
339 | | // 8 * len least significant bits are loaded from the memory with |
340 | | // BigEndian order. The 128 - 8 * len most significant bits are |
341 | | // set all to 0. |
342 | 0 | static uint128 Load128VariableLength(const void *p, int len) { |
343 | 0 | if (len <= 8) { |
344 | 0 | return uint128(Load64VariableLength(static_cast<const char *>(p)+8, |
345 | 0 | len)); |
346 | 0 | } else { |
347 | 0 | return uint128( |
348 | 0 | Load64VariableLength(p, len-8), |
349 | 0 | Load64(static_cast<const char *>(p)+8)); |
350 | 0 | } |
351 | 0 | } |
352 | | |
353 | | // Load & Store in machine's word size. |
354 | 0 | static uword_t LoadUnsignedWord(const void *p) { |
355 | 0 | if (sizeof(uword_t) == 8) |
356 | 0 | return Load64(p); |
357 | 0 | else |
358 | 0 | return Load32(p); |
359 | 0 | } |
360 | | |
361 | 0 | static void StoreUnsignedWord(void *p, uword_t v) { |
362 | 0 | if (sizeof(uword_t) == 8) |
363 | 0 | Store64(p, v); |
364 | 0 | else |
365 | 0 | Store32(p, static_cast<uint32_t>(v)); |
366 | 0 | } |
367 | | }; // BigEndian |
368 | | |
369 | | // Network byte order is big-endian |
370 | | typedef BigEndian NetworkByteOrder; |
371 | | |
372 | | namespace yb { |
373 | | namespace internal { |
374 | | |
375 | | template <size_t size, class Endian> |
376 | | struct EndianHelper; |
377 | | |
378 | | template <class Endian> |
379 | | struct EndianHelper<8, Endian> { |
380 | 1.48M | static uint64_t Load(const void* p) { |
381 | 1.48M | return Endian::Load64(p); |
382 | 1.48M | } _ZN2yb8internal12EndianHelperILm8E12LittleEndianE4LoadEPKv Line | Count | Source | 380 | 79.0k | static uint64_t Load(const void* p) { | 381 | 79.0k | return Endian::Load64(p); | 382 | 79.0k | } |
_ZN2yb8internal12EndianHelperILm8E9BigEndianE4LoadEPKv Line | Count | Source | 380 | 1.40M | static uint64_t Load(const void* p) { | 381 | 1.40M | return Endian::Load64(p); | 382 | 1.40M | } |
|
383 | | |
384 | 19.2k | static void Store(void* p, uint64_t v) { |
385 | 19.2k | Endian::Store64(p, v); |
386 | 19.2k | } |
387 | | }; |
388 | | |
389 | | template <class Endian> |
390 | | struct EndianHelper<4, Endian> { |
391 | 8.97M | static uint32_t Load(const void* p) { |
392 | 8.97M | return Endian::Load32(p); |
393 | 8.97M | } _ZN2yb8internal12EndianHelperILm4E12LittleEndianE4LoadEPKv Line | Count | Source | 391 | 3.10M | static uint32_t Load(const void* p) { | 392 | 3.10M | return Endian::Load32(p); | 393 | 3.10M | } |
_ZN2yb8internal12EndianHelperILm4E9BigEndianE4LoadEPKv Line | Count | Source | 391 | 5.86M | static uint32_t Load(const void* p) { | 392 | 5.86M | return Endian::Load32(p); | 393 | 5.86M | } |
|
394 | | |
395 | 3.55M | static void Store(void* p, uint32_t v) { |
396 | 3.55M | Endian::Store32(p, v); |
397 | 3.55M | } |
398 | | }; |
399 | | |
400 | | template <class Endian> |
401 | | struct EndianHelper<2, Endian> { |
402 | 71.7k | static uint16_t Load(const void* p) { |
403 | 71.7k | return Endian::Load16(p); |
404 | 71.7k | } |
405 | | |
406 | | static void Store(void* p, uint16_t v) { |
407 | | Endian::Store16(p, v); |
408 | | } |
409 | | }; |
410 | | |
411 | | template <class Endian> |
412 | | struct EndianHelper<1, Endian> { |
413 | 136k | static uint8_t Load(const void* p) { |
414 | 136k | return *reinterpret_cast<const uint8_t *>(p); |
415 | 136k | } |
416 | | |
417 | | static void Store(void* p, uint8_t v) { |
418 | | *reinterpret_cast<uint8_t *>(p) = v; |
419 | | } |
420 | | }; |
421 | | |
422 | | } // namespace internal |
423 | | |
424 | | template <class T, class Endian> |
425 | 10.6M | T Load(const void* p) { |
426 | 10.6M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); |
427 | 10.6M | } _ZN2yb4LoadINS_20TransactionErrorCodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 1.55M | T Load(const void* p) { | 426 | 1.55M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1.55M | } |
_ZN2yb4LoadINS_6master18MasterErrorPB_CodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 39.2k | T Load(const void* p) { | 426 | 39.2k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 39.2k | } |
_ZN2yb4LoadINS_2ql9ErrorCodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 20.3k | T Load(const void* p) { | 426 | 20.3k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 20.3k | } |
_ZN2yb4LoadIi12LittleEndianEET_PKv Line | Count | Source | 425 | 741k | T Load(const void* p) { | 426 | 741k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 741k | } |
_ZN2yb4LoadIj9BigEndianEET_PKv Line | Count | Source | 425 | 5.86M | T Load(const void* p) { | 426 | 5.86M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 5.86M | } |
_ZN2yb4LoadINS_13YBPgErrorCodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 533k | T Load(const void* p) { | 426 | 533k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 533k | } |
_ZN2yb4LoadIx12LittleEndianEET_PKv Line | Count | Source | 425 | 34.5k | T Load(const void* p) { | 426 | 34.5k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 34.5k | } |
_ZN2yb4LoadIy12LittleEndianEET_PKv Line | Count | Source | 425 | 23.1k | T Load(const void* p) { | 426 | 23.1k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 23.1k | } |
_ZN2yb4LoadINS_3cdc15CDCErrorPB_CodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 1 | T Load(const void* p) { | 426 | 1 | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1 | } |
_ZN2yb4LoadINS_7tserver24TabletServerErrorPB_CodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 183k | T Load(const void* p) { | 426 | 183k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 183k | } |
_ZN2yb4LoadINS_6tablet16RaftGroupStatePBE12LittleEndianEET_PKv Line | Count | Source | 425 | 48.4k | T Load(const void* p) { | 426 | 48.4k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 48.4k | } |
_ZN2yb4LoadINS_9consensus21ConsensusErrorPB_CodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 5.56k | T Load(const void* p) { | 426 | 5.56k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 5.56k | } |
_ZN2yb4LoadIN7rocksdb11TimeoutCodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 1 | T Load(const void* p) { | 426 | 1 | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1 | } |
_ZN2yb4LoadINS_29PgsqlResponsePB_RequestStatusE12LittleEndianEET_PKv Line | Count | Source | 425 | 2.07k | T Load(const void* p) { | 426 | 2.07k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 2.07k | } |
_ZN2yb4LoadINS_6client15ClientErrorCodeE12LittleEndianEET_PKv Line | Count | Source | 425 | 1.02k | T Load(const void* p) { | 426 | 1.02k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1.02k | } |
_ZN2yb4LoadINS_3rpc28ErrorStatusPB_RpcErrorCodePBE12LittleEndianEET_PKv Line | Count | Source | 425 | 179 | T Load(const void* p) { | 426 | 179 | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 179 | } |
_ZN2yb4LoadIm12LittleEndianEET_PKv Line | Count | Source | 425 | 1.02k | T Load(const void* p) { | 426 | 1.02k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1.02k | } |
_ZN2yb4LoadIh9BigEndianEET_PKv Line | Count | Source | 425 | 136k | T Load(const void* p) { | 426 | 136k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 136k | } |
_ZN2yb4LoadIt9BigEndianEET_PKv Line | Count | Source | 425 | 71.7k | T Load(const void* p) { | 426 | 71.7k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 71.7k | } |
_ZN2yb4LoadIy9BigEndianEET_PKv Line | Count | Source | 425 | 1.40M | T Load(const void* p) { | 426 | 1.40M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1.40M | } |
|
428 | | |
429 | | template <class T, class Endian> |
430 | 3.57M | void Store(void *p, T v) { |
431 | 3.57M | typedef typename std::make_unsigned<T>::type UnsignedT; |
432 | 3.57M | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); |
433 | 3.57M | } _ZN2yb5StoreINS_20TransactionErrorCodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 356k | void Store(void *p, T v) { | 431 | 356k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 356k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 356k | } |
_ZN2yb5StoreINS_6master18MasterErrorPB_CodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 18.8k | void Store(void *p, T v) { | 431 | 18.8k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 18.8k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 18.8k | } |
_ZN2yb5StoreINS_2ql9ErrorCodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 12.3k | void Store(void *p, T v) { | 431 | 12.3k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 12.3k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 12.3k | } |
_ZN2yb5StoreIi12LittleEndianEEvPvT_ Line | Count | Source | 430 | 2.81M | void Store(void *p, T v) { | 431 | 2.81M | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 2.81M | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 2.81M | } |
_ZN2yb5StoreINS_13YBPgErrorCodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 167k | void Store(void *p, T v) { | 431 | 167k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 167k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 167k | } |
_ZN2yb5StoreIx12LittleEndianEEvPvT_ Line | Count | Source | 430 | 5.49k | void Store(void *p, T v) { | 431 | 5.49k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 5.49k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 5.49k | } |
_ZN2yb5StoreINS_3cdc15CDCErrorPB_CodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 1 | void Store(void *p, T v) { | 431 | 1 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 1 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 1 | } |
_ZN2yb5StoreINS_7tserver24TabletServerErrorPB_CodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 141k | void Store(void *p, T v) { | 431 | 141k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 141k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 141k | } |
_ZN2yb5StoreINS_6tablet16RaftGroupStatePBE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 49.2k | void Store(void *p, T v) { | 431 | 49.2k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 49.2k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 49.2k | } |
_ZN2yb5StoreINS_9consensus21ConsensusErrorPB_CodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 12 | void Store(void *p, T v) { | 431 | 12 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 12 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 12 | } |
_ZN2yb5StoreIN7rocksdb11TimeoutCodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 176 | void Store(void *p, T v) { | 431 | 176 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 176 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 176 | } |
_ZN2yb5StoreINS_29PgsqlResponsePB_RequestStatusE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 1.03k | void Store(void *p, T v) { | 431 | 1.03k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 1.03k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 1.03k | } |
_ZN2yb5StoreIm12LittleEndianEEvPvT_ Line | Count | Source | 430 | 1.03k | void Store(void *p, T v) { | 431 | 1.03k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 1.03k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 1.03k | } |
_ZN2yb5StoreINS_6client15ClientErrorCodeE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 3 | void Store(void *p, T v) { | 431 | 3 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 3 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 3 | } |
_ZN2yb5StoreINS_3rpc28ErrorStatusPB_RpcErrorCodePBE12LittleEndianEEvPvT_ Line | Count | Source | 430 | 1.27k | void Store(void *p, T v) { | 431 | 1.27k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 1.27k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 1.27k | } |
Unexecuted instantiation: _ZN2yb5StoreINS_32WireProtocolTabletServerErrorTag5ValueE12LittleEndianEEvPvT_ _ZN2yb5StoreIy12LittleEndianEEvPvT_ Line | Count | Source | 430 | 392 | void Store(void *p, T v) { | 431 | 392 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 392 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 392 | } |
|
434 | | |
435 | | } // namespace yb |
436 | | |
437 | | #endif // YB_GUTIL_ENDIAN_H |