/Users/deen/code/yugabyte-db/src/yb/gutil/endian.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2005 Google Inc. |
2 | | // |
3 | | // Licensed to the Apache Software Foundation (ASF) under one |
4 | | // or more contributor license agreements. See the NOTICE file |
5 | | // distributed with this work for additional information |
6 | | // regarding copyright ownership. The ASF licenses this file |
7 | | // to you under the Apache License, Version 2.0 (the |
8 | | // "License"); you may not use this file except in compliance |
9 | | // with the License. You may obtain a copy of the License at |
10 | | // |
11 | | // http://www.apache.org/licenses/LICENSE-2.0 |
12 | | // |
13 | | // Unless required by applicable law or agreed to in writing, |
14 | | // software distributed under the License is distributed on an |
15 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
16 | | // KIND, either express or implied. See the License for the |
17 | | // specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // The following only applies to changes made to this file as part of YugaByte development. |
21 | | // |
22 | | // Portions Copyright (c) YugaByte, Inc. |
23 | | // |
24 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
25 | | // in compliance with the License. You may obtain a copy of the License at |
26 | | // |
27 | | // http://www.apache.org/licenses/LICENSE-2.0 |
28 | | // |
29 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
30 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
31 | | // or implied. See the License for the specific language governing permissions and limitations |
32 | | // under the License. |
33 | | // |
34 | | // --- |
35 | | // |
36 | | // |
37 | | // Utility functions that depend on bytesex. We define htonll and ntohll, |
38 | | // as well as "Google" versions of all the standards: ghtonl, ghtons, and |
39 | | // so on. These functions do exactly the same as their standard variants, |
40 | | // but don't require including the dangerous netinet/in.h. |
41 | | // |
42 | | // Buffer routines will copy to and from buffers without causing |
43 | | // a bus error when the architecture requires differnt byte alignments |
44 | | #ifndef YB_GUTIL_ENDIAN_H |
45 | | #define YB_GUTIL_ENDIAN_H |
46 | | |
47 | | #include <assert.h> |
48 | | |
49 | | #include "yb/gutil/int128.h" |
50 | | #include "yb/gutil/integral_types.h" |
51 | | #include "yb/gutil/port.h" |
52 | | |
53 | 674M | inline uint64 gbswap_64(uint64 host_int) { |
54 | | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) |
55 | | // Adapted from /usr/include/byteswap.h. Not available on Mac. |
56 | | if (__builtin_constant_p(host_int)) { |
57 | | return __bswap_constant_64(host_int); |
58 | | } else { |
59 | | uint64 result; |
60 | | __asm__("bswap %0" : "=r" (result) : "0" (host_int)); |
61 | | return result; |
62 | | } |
63 | | #elif defined(bswap_64) |
64 | 674M | return bswap_64337M (host_int); |
65 | | #else |
66 | | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | |
67 | | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); |
68 | | #endif // bswap_64 |
69 | 674M | } gbswap_64(unsigned long long) Line | Count | Source | 53 | 337M | inline uint64 gbswap_64(uint64 host_int) { | 54 | | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) | 55 | | // Adapted from /usr/include/byteswap.h. Not available on Mac. | 56 | | if (__builtin_constant_p(host_int)) { | 57 | | return __bswap_constant_64(host_int); | 58 | | } else { | 59 | | uint64 result; | 60 | | __asm__("bswap %0" : "=r" (result) : "0" (host_int)); | 61 | | return result; | 62 | | } | 63 | | #elif defined(bswap_64) | 64 | 337M | return bswap_64(host_int); | 65 | | #else | 66 | | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | | 67 | | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); | 68 | | #endif // bswap_64 | 69 | 337M | } |
gbswap_64(unsigned long long) Line | Count | Source | 53 | 337M | inline uint64 gbswap_64(uint64 host_int) { | 54 | | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) | 55 | | // Adapted from /usr/include/byteswap.h. Not available on Mac. | 56 | | if (__builtin_constant_p(host_int)) { | 57 | | return __bswap_constant_64(host_int); | 58 | | } else { | 59 | | uint64 result; | 60 | | __asm__("bswap %0" : "=r" (result) : "0" (host_int)); | 61 | | return result; | 62 | | } | 63 | | #elif defined(bswap_64) | 64 | 337M | return bswap_64(host_int); | 65 | | #else | 66 | | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | | 67 | | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); | 68 | | #endif // bswap_64 | 69 | 337M | } |
|
70 | | |
71 | | #ifdef IS_LITTLE_ENDIAN |
72 | | |
73 | | // Definitions for ntohl etc. that don't require us to include |
74 | | // netinet/in.h. We wrap bswap_32 and bswap_16 in functions rather |
75 | | // than just #defining them because in debug mode, gcc doesn't |
76 | | // correctly handle the (rather involved) definitions of bswap_32. |
77 | | // gcc guarantees that inline functions are as fast as macros, so |
78 | | // this isn't a performance hit. |
79 | 0 | inline uint16 ghtons(uint16 x) { return bswap_16(x); } |
80 | 0 | inline uint32 ghtonl(uint32 x) { return bswap_32(x); } |
81 | 0 | inline uint64 ghtonll(uint64 x) { return gbswap_64(x); } |
82 | | |
83 | | #elif defined IS_BIG_ENDIAN |
84 | | |
85 | | // These definitions are simpler on big-endian machines |
86 | | // These are functions instead of macros to avoid self-assignment warnings |
87 | | // on calls such as "i = ghtnol(i);". This also provides type checking. |
88 | | inline uint16 ghtons(uint16 x) { return x; } |
89 | | inline uint32 ghtonl(uint32 x) { return x; } |
90 | | inline uint64 ghtonll(uint64 x) { return x; } |
91 | | |
92 | | #else |
93 | | #error "Unsupported bytesex: Either IS_BIG_ENDIAN or IS_LITTLE_ENDIAN must be defined" // NOLINT |
94 | | #endif // bytesex |
95 | | |
96 | | |
97 | | // ntoh* and hton* are the same thing for any size and bytesex, |
98 | | // since the function is an involution, i.e., its own inverse. |
99 | | #define gntohl(x) ghtonl(x) |
100 | | #define gntohs(x) ghtons(x) |
101 | | #define gntohll(x) ghtonll(x) |
102 | | #if !defined(__APPLE__) |
103 | | // This one is safe to take as it's an extension |
104 | | #define htonll(x) ghtonll(x) |
105 | | #define ntohll(x) htonll(x) |
106 | | #endif |
107 | | |
108 | | // Utilities to convert numbers between the current hosts's native byte |
109 | | // order and little-endian byte order |
110 | | // |
111 | | // Load/Store methods are alignment safe |
112 | | class LittleEndian { |
113 | | public: |
114 | | // Conversion functions. |
115 | | #ifdef IS_LITTLE_ENDIAN |
116 | | |
117 | 0 | static uint16 FromHost16(uint16 x) { return x; } |
118 | 0 | static uint16 ToHost16(uint16 x) { return x; } |
119 | | |
120 | 7.80M | static uint32 FromHost32(uint32 x) { return x; } |
121 | 224M | static uint32 ToHost32(uint32 x) { return x; } |
122 | | |
123 | 39.3M | static uint64 FromHost64(uint64 x) { return x; } |
124 | 113M | static uint64 ToHost64(uint64 x) { return x; } |
125 | | |
126 | 0 | static bool IsLittleEndian() { return true; } |
127 | | |
128 | | #elif defined IS_BIG_ENDIAN |
129 | | |
130 | | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
131 | | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
132 | | |
133 | | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
134 | | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
135 | | |
136 | | static uint64 FromHost64(uint64 x) { return gbswap_64(x); } |
137 | | static uint64 ToHost64(uint64 x) { return gbswap_64(x); } |
138 | | |
139 | | static bool IsLittleEndian() { return false; } |
140 | | |
141 | | #endif /* ENDIAN */ |
142 | | |
143 | | // Functions to do unaligned loads and stores in little-endian order. |
144 | 0 | static uint16 Load16(const void *p) { |
145 | 0 | return ToHost16(UNALIGNED_LOAD16(p)); |
146 | 0 | } |
147 | | |
148 | 0 | static void Store16(void *p, uint16 v) { |
149 | 0 | UNALIGNED_STORE16(p, FromHost16(v)); |
150 | 0 | } |
151 | | |
152 | 224M | static uint32 Load32(const void *p) { |
153 | 224M | return ToHost32(UNALIGNED_LOAD32(p)); |
154 | 224M | } |
155 | | |
156 | 7.80M | static void Store32(void *p, uint32 v) { |
157 | 7.80M | UNALIGNED_STORE32(p, FromHost32(v)); |
158 | 7.80M | } |
159 | | |
160 | 107M | static uint64 Load64(const void *p) { |
161 | 107M | return ToHost64(UNALIGNED_LOAD64(p)); |
162 | 107M | } |
163 | | |
164 | | // Build a uint64 from 1-8 bytes. |
165 | | // 8 * len least significant bits are loaded from the memory with |
166 | | // LittleEndian order. The 64 - 8 * len most significant bits are |
167 | | // set all to 0. |
168 | | // In latex-friendly words, this function returns: |
169 | | // $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned. |
170 | | // |
171 | | // This function is equivalent with: |
172 | | // uint64 val = 0; |
173 | | // memcpy(&val, p, len); |
174 | | // return ToHost64(val); |
175 | | // TODO(user): write a small benchmark and benchmark the speed |
176 | | // of a memcpy based approach. |
177 | | // |
178 | | // For speed reasons this function does not work for len == 0. |
179 | | // The caller needs to guarantee that 1 <= len <= 8. |
180 | 0 | static uint64 Load64VariableLength(const void * const p, int len) { |
181 | 0 | assert(len >= 1 && len <= 8); |
182 | 0 | const char * const buf = static_cast<const char *>(p); |
183 | 0 | uint64 val = 0; |
184 | 0 | --len; |
185 | 0 | do { |
186 | 0 | val = (val << 8) | buf[len]; |
187 | 0 | // (--len >= 0) is about 10 % faster than (len--) in some benchmarks. |
188 | 0 | } while (--len >= 0); |
189 | 0 | // No ToHost64(...) needed. The bytes are accessed in little-endian manner |
190 | 0 | // on every architecture. |
191 | 0 | return val; |
192 | 0 | } |
193 | | |
194 | 31.4k | static void Store64(void *p, uint64 v) { |
195 | 31.4k | UNALIGNED_STORE64(p, FromHost64(v)); |
196 | 31.4k | } |
197 | | |
198 | 0 | static uint128 Load128(const void *p) { |
199 | 0 | return uint128( |
200 | 0 | ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64 *>(p) + 1)), |
201 | 0 | ToHost64(UNALIGNED_LOAD64(p))); |
202 | 0 | } |
203 | | |
204 | 0 | static void Store128(void *p, const uint128 v) { |
205 | 0 | UNALIGNED_STORE64(p, FromHost64(Uint128Low64(v))); |
206 | 0 | UNALIGNED_STORE64(reinterpret_cast<uint64 *>(p) + 1, |
207 | 0 | FromHost64(Uint128High64(v))); |
208 | 0 | } |
209 | | |
210 | | // Build a uint128 from 1-16 bytes. |
211 | | // 8 * len least significant bits are loaded from the memory with |
212 | | // LittleEndian order. The 128 - 8 * len most significant bits are |
213 | | // set all to 0. |
214 | 0 | static uint128 Load128VariableLength(const void *p, int len) { |
215 | 0 | if (len <= 8) { |
216 | 0 | return uint128(Load64VariableLength(p, len)); |
217 | 0 | } else { |
218 | 0 | return uint128( |
219 | 0 | Load64VariableLength(static_cast<const char *>(p) + 8, len - 8), |
220 | 0 | Load64(p)); |
221 | 0 | } |
222 | 0 | } |
223 | | |
224 | | // Load & Store in machine's word size. |
225 | 0 | static uword_t LoadUnsignedWord(const void *p) { |
226 | 0 | if (sizeof(uword_t) == 8) |
227 | 0 | return Load64(p); |
228 | 0 | else |
229 | 0 | return Load32(p); |
230 | 0 | } |
231 | | |
232 | 0 | static void StoreUnsignedWord(void *p, uword_t v) { |
233 | 0 | if (sizeof(v) == 8) |
234 | 0 | Store64(p, v); |
235 | 0 | else |
236 | 0 | Store32(p, static_cast<uint32_t>(v)); |
237 | 0 | } |
238 | | }; |
239 | | |
240 | | // Utilities to convert numbers between the current hosts's native byte |
241 | | // order and big-endian byte order (same as network byte order) |
242 | | // |
243 | | // Load/Store methods are alignment safe |
244 | | class BigEndian { |
245 | | public: |
246 | | #ifdef IS_LITTLE_ENDIAN |
247 | | |
248 | 193M | static uint16 FromHost16(uint16 x) { return bswap_1696.5M (x); } BigEndian::FromHost16(unsigned short) Line | Count | Source | 248 | 96.5M | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
BigEndian::FromHost16(unsigned short) Line | Count | Source | 248 | 96.5M | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
|
249 | 1.13G | static uint16 ToHost16(uint16 x) { return bswap_16568M (x); } BigEndian::ToHost16(unsigned short) Line | Count | Source | 249 | 568M | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
BigEndian::ToHost16(unsigned short) Line | Count | Source | 249 | 568M | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
|
250 | | |
251 | 1.46G | static uint32 FromHost32(uint32 x) { return bswap_32731M (x); } BigEndian::FromHost32(unsigned int) Line | Count | Source | 251 | 731M | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
BigEndian::FromHost32(unsigned int) Line | Count | Source | 251 | 731M | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
|
252 | 3.20G | static uint32 ToHost32(uint32 x) { return bswap_321.60G (x); } BigEndian::ToHost32(unsigned int) Line | Count | Source | 252 | 1.60G | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
BigEndian::ToHost32(unsigned int) Line | Count | Source | 252 | 1.60G | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
|
253 | | |
254 | 137M | static uint64 FromHost64(uint64 x) { return gbswap_64(x); } |
255 | 199M | static uint64 ToHost64(uint64 x) { return gbswap_64(x); } |
256 | | |
257 | 0 | static bool IsLittleEndian() { return true; } |
258 | | |
259 | | #elif defined IS_BIG_ENDIAN |
260 | | |
261 | | static uint16 FromHost16(uint16 x) { return x; } |
262 | | static uint16 ToHost16(uint16 x) { return x; } |
263 | | |
264 | | static uint32 FromHost32(uint32 x) { return x; } |
265 | | static uint32 ToHost32(uint32 x) { return x; } |
266 | | |
267 | | static uint64 FromHost64(uint64 x) { return x; } |
268 | | static uint64 ToHost64(uint64 x) { return x; } |
269 | | |
270 | | static bool IsLittleEndian() { return false; } |
271 | | |
272 | | #endif /* ENDIAN */ |
273 | | // Functions to do unaligned loads and stores in little-endian order. |
274 | 567M | static uint16 Load16(const void *p) { |
275 | 567M | return ToHost16(UNALIGNED_LOAD16(p)); |
276 | 567M | } |
277 | | |
278 | 96.5M | static void Store16(void *p, uint16 v) { |
279 | 96.5M | UNALIGNED_STORE16(p, FromHost16(v)); |
280 | 96.5M | } |
281 | | |
282 | 1.60G | static uint32 Load32(const void *p) { |
283 | 1.60G | return ToHost32(UNALIGNED_LOAD32(p)); |
284 | 1.60G | } |
285 | | |
286 | 667M | static void Store32(void *p, uint32 v) { |
287 | 667M | UNALIGNED_STORE32(p, FromHost32(v)); |
288 | 667M | } |
289 | | |
290 | 199M | static uint64 Load64(const void *p) { |
291 | 199M | return ToHost64(UNALIGNED_LOAD64(p)); |
292 | 199M | } |
293 | | |
294 | | // Build a uint64 from 1-8 bytes. |
295 | | // 8 * len least significant bits are loaded from the memory with |
296 | | // BigEndian order. The 64 - 8 * len most significant bits are |
297 | | // set all to 0. |
298 | | // In latex-friendly words, this function returns: |
299 | | // $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned. |
300 | | // |
301 | | // This function is equivalent with: |
302 | | // uint64 val = 0; |
303 | | // memcpy(&val, p, len); |
304 | | // return ToHost64(val); |
305 | | // TODO(user): write a small benchmark and benchmark the speed |
306 | | // of a memcpy based approach. |
307 | | // |
308 | | // For speed reasons this function does not work for len == 0. |
309 | | // The caller needs to guarantee that 1 <= len <= 8. |
310 | 0 | static uint64 Load64VariableLength(const void * const p, int len) { |
311 | 0 | assert(len >= 1 && len <= 8); |
312 | 0 | uint64 val = Load64(p); |
313 | 0 | uint64 mask = 0; |
314 | 0 | --len; |
315 | 0 | do { |
316 | 0 | mask = (mask << 8) | 0xff; |
317 | 0 | // (--len >= 0) is about 10 % faster than (len--) in some benchmarks. |
318 | 0 | } while (--len >= 0); |
319 | 0 | return val & mask; |
320 | 0 | } |
321 | | |
322 | 137M | static void Store64(void *p, uint64 v) { |
323 | 137M | UNALIGNED_STORE64(p, FromHost64(v)); |
324 | 137M | } |
325 | | |
326 | 0 | static uint128 Load128(const void *p) { |
327 | 0 | return uint128( |
328 | 0 | ToHost64(UNALIGNED_LOAD64(p)), |
329 | 0 | ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64 *>(p) + 1))); |
330 | 0 | } |
331 | | |
332 | 0 | static void Store128(void *p, const uint128 v) { |
333 | 0 | UNALIGNED_STORE64(p, FromHost64(Uint128High64(v))); |
334 | 0 | UNALIGNED_STORE64(reinterpret_cast<uint64 *>(p) + 1, |
335 | 0 | FromHost64(Uint128Low64(v))); |
336 | 0 | } |
337 | | |
338 | | // Build a uint128 from 1-16 bytes. |
339 | | // 8 * len least significant bits are loaded from the memory with |
340 | | // BigEndian order. The 128 - 8 * len most significant bits are |
341 | | // set all to 0. |
342 | 0 | static uint128 Load128VariableLength(const void *p, int len) { |
343 | 0 | if (len <= 8) { |
344 | 0 | return uint128(Load64VariableLength(static_cast<const char *>(p)+8, |
345 | 0 | len)); |
346 | 0 | } else { |
347 | 0 | return uint128( |
348 | 0 | Load64VariableLength(p, len-8), |
349 | 0 | Load64(static_cast<const char *>(p)+8)); |
350 | 0 | } |
351 | 0 | } |
352 | | |
353 | | // Load & Store in machine's word size. |
354 | 0 | static uword_t LoadUnsignedWord(const void *p) { |
355 | 0 | if (sizeof(uword_t) == 8) |
356 | 0 | return Load64(p); |
357 | 0 | else |
358 | 0 | return Load32(p); |
359 | 0 | } |
360 | | |
361 | 0 | static void StoreUnsignedWord(void *p, uword_t v) { |
362 | 0 | if (sizeof(uword_t) == 8) |
363 | 0 | Store64(p, v); |
364 | 0 | else |
365 | 0 | Store32(p, static_cast<uint32_t>(v)); |
366 | 0 | } |
367 | | }; // BigEndian |
368 | | |
369 | | // Network byte order is big-endian |
370 | | typedef BigEndian NetworkByteOrder; |
371 | | |
372 | | namespace yb { |
373 | | namespace internal { |
374 | | |
375 | | template <size_t size, class Endian> |
376 | | struct EndianHelper; |
377 | | |
378 | | template <class Endian> |
379 | | struct EndianHelper<8, Endian> { |
380 | 2.51M | static uint64_t Load(const void* p) { |
381 | 2.51M | return Endian::Load64(p); |
382 | 2.51M | } yb::internal::EndianHelper<8ul, LittleEndian>::Load(void const*) Line | Count | Source | 380 | 94.7k | static uint64_t Load(const void* p) { | 381 | 94.7k | return Endian::Load64(p); | 382 | 94.7k | } |
yb::internal::EndianHelper<8ul, BigEndian>::Load(void const*) Line | Count | Source | 380 | 2.42M | static uint64_t Load(const void* p) { | 381 | 2.42M | return Endian::Load64(p); | 382 | 2.42M | } |
|
383 | | |
384 | 31.4k | static void Store(void* p, uint64_t v) { |
385 | 31.4k | Endian::Store64(p, v); |
386 | 31.4k | } |
387 | | }; |
388 | | |
389 | | template <class Endian> |
390 | | struct EndianHelper<4, Endian> { |
391 | 18.8M | static uint32_t Load(const void* p) { |
392 | 18.8M | return Endian::Load32(p); |
393 | 18.8M | } yb::internal::EndianHelper<4ul, LittleEndian>::Load(void const*) Line | Count | Source | 391 | 8.60M | static uint32_t Load(const void* p) { | 392 | 8.60M | return Endian::Load32(p); | 393 | 8.60M | } |
yb::internal::EndianHelper<4ul, BigEndian>::Load(void const*) Line | Count | Source | 391 | 10.1M | static uint32_t Load(const void* p) { | 392 | 10.1M | return Endian::Load32(p); | 393 | 10.1M | } |
|
394 | | |
395 | 7.81M | static void Store(void* p, uint32_t v) { |
396 | 7.81M | Endian::Store32(p, v); |
397 | 7.81M | } |
398 | | }; |
399 | | |
400 | | template <class Endian> |
401 | | struct EndianHelper<2, Endian> { |
402 | 71.9k | static uint16_t Load(const void* p) { |
403 | 71.9k | return Endian::Load16(p); |
404 | 71.9k | } |
405 | | |
406 | | static void Store(void* p, uint16_t v) { |
407 | | Endian::Store16(p, v); |
408 | | } |
409 | | }; |
410 | | |
411 | | template <class Endian> |
412 | | struct EndianHelper<1, Endian> { |
413 | 172k | static uint8_t Load(const void* p) { |
414 | 172k | return *reinterpret_cast<const uint8_t *>(p); |
415 | 172k | } |
416 | | |
417 | | static void Store(void* p, uint8_t v) { |
418 | | *reinterpret_cast<uint8_t *>(p) = v; |
419 | | } |
420 | | }; |
421 | | |
422 | | } // namespace internal |
423 | | |
424 | | template <class T, class Endian> |
425 | 21.5M | T Load(const void* p) { |
426 | 21.5M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); |
427 | 21.5M | } yb::TransactionErrorCode yb::Load<yb::TransactionErrorCode, LittleEndian>(void const*) Line | Count | Source | 425 | 3.07M | T Load(const void* p) { | 426 | 3.07M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 3.07M | } |
yb::YBPgErrorCode yb::Load<yb::YBPgErrorCode, LittleEndian>(void const*) Line | Count | Source | 425 | 1.19M | T Load(const void* p) { | 426 | 1.19M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1.19M | } |
yb::PgsqlResponsePB_RequestStatus yb::Load<yb::PgsqlResponsePB_RequestStatus, LittleEndian>(void const*) Line | Count | Source | 425 | 26.6k | T Load(const void* p) { | 426 | 26.6k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 26.6k | } |
long long yb::Load<long long, LittleEndian>(void const*) Line | Count | Source | 425 | 34.6k | T Load(const void* p) { | 426 | 34.6k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 34.6k | } |
yb::consensus::ConsensusErrorPB_Code yb::Load<yb::consensus::ConsensusErrorPB_Code, LittleEndian>(void const*) Line | Count | Source | 425 | 7.09k | T Load(const void* p) { | 426 | 7.09k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 7.09k | } |
yb::tserver::TabletServerErrorPB_Code yb::Load<yb::tserver::TabletServerErrorPB_Code, LittleEndian>(void const*) Line | Count | Source | 425 | 260k | T Load(const void* p) { | 426 | 260k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 260k | } |
yb::client::ClientErrorCode yb::Load<yb::client::ClientErrorCode, LittleEndian>(void const*) Line | Count | Source | 425 | 4.03k | T Load(const void* p) { | 426 | 4.03k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 4.03k | } |
unsigned int yb::Load<unsigned int, BigEndian>(void const*) Line | Count | Source | 425 | 10.1M | T Load(const void* p) { | 426 | 10.1M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 10.1M | } |
yb::rpc::ErrorStatusPB_RpcErrorCodePB yb::Load<yb::rpc::ErrorStatusPB_RpcErrorCodePB, LittleEndian>(void const*) Line | Count | Source | 425 | 18.3k | T Load(const void* p) { | 426 | 18.3k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 18.3k | } |
int yb::Load<int, LittleEndian>(void const*) Line | Count | Source | 425 | 3.90M | T Load(const void* p) { | 426 | 3.90M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 3.90M | } |
yb::tablet::RaftGroupStatePB yb::Load<yb::tablet::RaftGroupStatePB, LittleEndian>(void const*) Line | Count | Source | 425 | 73.1k | T Load(const void* p) { | 426 | 73.1k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 73.1k | } |
yb::master::MasterErrorPB_Code yb::Load<yb::master::MasterErrorPB_Code, LittleEndian>(void const*) Line | Count | Source | 425 | 45.2k | T Load(const void* p) { | 426 | 45.2k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 45.2k | } |
yb::cdc::CDCErrorPB_Code yb::Load<yb::cdc::CDCErrorPB_Code, LittleEndian>(void const*) Line | Count | Source | 425 | 1 | T Load(const void* p) { | 426 | 1 | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1 | } |
unsigned long yb::Load<unsigned long, LittleEndian>(void const*) Line | Count | Source | 425 | 11.6k | T Load(const void* p) { | 426 | 11.6k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 11.6k | } |
yb::ql::ErrorCode yb::Load<yb::ql::ErrorCode, LittleEndian>(void const*) Line | Count | Source | 425 | 20.3k | T Load(const void* p) { | 426 | 20.3k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 20.3k | } |
unsigned long long yb::Load<unsigned long long, LittleEndian>(void const*) Line | Count | Source | 425 | 28.1k | T Load(const void* p) { | 426 | 28.1k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 28.1k | } |
unsigned char yb::Load<unsigned char, BigEndian>(void const*) Line | Count | Source | 425 | 172k | T Load(const void* p) { | 426 | 172k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 172k | } |
unsigned short yb::Load<unsigned short, BigEndian>(void const*) Line | Count | Source | 425 | 71.9k | T Load(const void* p) { | 426 | 71.9k | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 71.9k | } |
unsigned long long yb::Load<unsigned long long, BigEndian>(void const*) Line | Count | Source | 425 | 2.42M | T Load(const void* p) { | 426 | 2.42M | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 2.42M | } |
rocksdb::TimeoutCode yb::Load<rocksdb::TimeoutCode, LittleEndian>(void const*) Line | Count | Source | 425 | 1 | T Load(const void* p) { | 426 | 1 | return static_cast<T>(internal::EndianHelper<sizeof(T), Endian>::Load(p)); | 427 | 1 | } |
|
428 | | |
429 | | template <class T, class Endian> |
430 | 7.83M | void Store(void *p, T v) { |
431 | 7.83M | typedef typename std::make_unsigned<T>::type UnsignedT; |
432 | 7.83M | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); |
433 | 7.83M | } void yb::Store<yb::ql::ErrorCode, LittleEndian>(void*, yb::ql::ErrorCode) Line | Count | Source | 430 | 12.3k | void Store(void *p, T v) { | 431 | 12.3k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 12.3k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 12.3k | } |
void yb::Store<yb::TransactionErrorCode, LittleEndian>(void*, yb::TransactionErrorCode) Line | Count | Source | 430 | 553k | void Store(void *p, T v) { | 431 | 553k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 553k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 553k | } |
void yb::Store<yb::YBPgErrorCode, LittleEndian>(void*, yb::YBPgErrorCode) Line | Count | Source | 430 | 283k | void Store(void *p, T v) { | 431 | 283k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 283k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 283k | } |
void yb::Store<yb::PgsqlResponsePB_RequestStatus, LittleEndian>(void*, yb::PgsqlResponsePB_RequestStatus) Line | Count | Source | 430 | 13.0k | void Store(void *p, T v) { | 431 | 13.0k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 13.0k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 13.0k | } |
void yb::Store<unsigned long, LittleEndian>(void*, unsigned long) Line | Count | Source | 430 | 13.0k | void Store(void *p, T v) { | 431 | 13.0k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 13.0k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 13.0k | } |
void yb::Store<yb::tserver::TabletServerErrorPB_Code, LittleEndian>(void*, yb::tserver::TabletServerErrorPB_Code) Line | Count | Source | 430 | 195k | void Store(void *p, T v) { | 431 | 195k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 195k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 195k | } |
void yb::Store<long long, LittleEndian>(void*, long long) Line | Count | Source | 430 | 5.49k | void Store(void *p, T v) { | 431 | 5.49k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 5.49k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 5.49k | } |
void yb::Store<yb::consensus::ConsensusErrorPB_Code, LittleEndian>(void*, yb::consensus::ConsensusErrorPB_Code) Line | Count | Source | 430 | 23 | void Store(void *p, T v) { | 431 | 23 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 23 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 23 | } |
void yb::Store<yb::tablet::RaftGroupStatePB, LittleEndian>(void*, yb::tablet::RaftGroupStatePB) Line | Count | Source | 430 | 74.5k | void Store(void *p, T v) { | 431 | 74.5k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 74.5k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 74.5k | } |
void yb::Store<yb::client::ClientErrorCode, LittleEndian>(void*, yb::client::ClientErrorCode) Line | Count | Source | 430 | 3 | void Store(void *p, T v) { | 431 | 3 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 3 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 3 | } |
void yb::Store<yb::rpc::ErrorStatusPB_RpcErrorCodePB, LittleEndian>(void*, yb::rpc::ErrorStatusPB_RpcErrorCodePB) Line | Count | Source | 430 | 266k | void Store(void *p, T v) { | 431 | 266k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 266k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 266k | } |
void yb::Store<int, LittleEndian>(void*, int) Line | Count | Source | 430 | 6.39M | void Store(void *p, T v) { | 431 | 6.39M | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 6.39M | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 6.39M | } |
void yb::Store<yb::master::MasterErrorPB_Code, LittleEndian>(void*, yb::master::MasterErrorPB_Code) Line | Count | Source | 430 | 22.1k | void Store(void *p, T v) { | 431 | 22.1k | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 22.1k | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 22.1k | } |
Unexecuted instantiation: void yb::Store<yb::WireProtocolTabletServerErrorTag::Value, LittleEndian>(void*, yb::WireProtocolTabletServerErrorTag::Value) void yb::Store<rocksdb::TimeoutCode, LittleEndian>(void*, rocksdb::TimeoutCode) Line | Count | Source | 430 | 176 | void Store(void *p, T v) { | 431 | 176 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 176 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 176 | } |
void yb::Store<yb::cdc::CDCErrorPB_Code, LittleEndian>(void*, yb::cdc::CDCErrorPB_Code) Line | Count | Source | 430 | 1 | void Store(void *p, T v) { | 431 | 1 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 1 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 1 | } |
void yb::Store<unsigned long long, LittleEndian>(void*, unsigned long long) Line | Count | Source | 430 | 477 | void Store(void *p, T v) { | 431 | 477 | typedef typename std::make_unsigned<T>::type UnsignedT; | 432 | 477 | internal::EndianHelper<sizeof(T), Endian>::Store(p, static_cast<UnsignedT>(v)); | 433 | 477 | } |
|
434 | | |
435 | | } // namespace yb |
436 | | |
437 | | #endif // YB_GUTIL_ENDIAN_H |