/Users/deen/code/yugabyte-db/src/yb/gutil/hash/jenkins_lookup2.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2011 Google Inc. All Rights Reserved. |
2 | | // |
3 | | // The following only applies to changes made to this file as part of YugaByte development. |
4 | | // |
5 | | // Portions Copyright (c) YugaByte, Inc. |
6 | | // |
7 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
8 | | // in compliance with the License. You may obtain a copy of the License at |
9 | | // |
10 | | // http://www.apache.org/licenses/LICENSE-2.0 |
11 | | // |
12 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
13 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
14 | | // or implied. See the License for the specific language governing permissions and limitations |
15 | | // under the License. |
16 | | // |
17 | | // Legacy implementation of the core Jenkins lookup2 algorithm. This is used in |
18 | | // many older hash functions which we are unable to remove or change due to the |
19 | | // values being recorded. New code should not use any of these routines and |
20 | | // should not include this header file. It pollutes the global namespace with |
21 | | // the 'mix' function. |
22 | | // |
23 | | // This file contains the basic hash "mix" code which is widely referenced. |
24 | | // |
25 | | // This file also contains routines used to load an unaligned little-endian |
26 | | // word from memory. This relatively generic functionality probably |
27 | | // shouldn't live in this file. |
28 | | |
29 | | #ifndef UTIL_HASH_JENKINS_LOOKUP2_H_ |
30 | | #define UTIL_HASH_JENKINS_LOOKUP2_H_ |
31 | | |
32 | | #include "yb/gutil/endian.h" |
33 | | #include "yb/gutil/integral_types.h" |
34 | | |
35 | | // ---------------------------------------------------------------------- |
36 | | // mix() |
37 | | // The hash function I use is due to Bob Jenkins (see |
38 | | // http://burtleburtle.net/bob/hash/index.html). |
39 | | // Each mix takes 36 instructions, in 18 cycles if you're lucky. |
40 | | // |
41 | | // On x86 architectures, this requires 45 instructions in 27 cycles, |
42 | | // if you're lucky. |
43 | | // ---------------------------------------------------------------------- |
44 | | |
45 | 154M | static inline void mix(uint32& a, uint32& b, uint32& c) { // 32bit version |
46 | 154M | a -= b; a -= c; a ^= (c>>13); |
47 | 154M | b -= c; b -= a; b ^= (a<<8); |
48 | 154M | c -= a; c -= b; c ^= (b>>13); |
49 | 154M | a -= b; a -= c; a ^= (c>>12); |
50 | 154M | b -= c; b -= a; b ^= (a<<16); |
51 | 154M | c -= a; c -= b; c ^= (b>>5); |
52 | 154M | a -= b; a -= c; a ^= (c>>3); |
53 | 154M | b -= c; b -= a; b ^= (a<<10); |
54 | 154M | c -= a; c -= b; c ^= (b>>15); |
55 | 154M | } Unexecuted instantiation: hash.cc:mix(unsigned int&, unsigned int&, unsigned int&) jenkins.cc:mix(unsigned int&, unsigned int&, unsigned int&) Line | Count | Source | 45 | 154M | static inline void mix(uint32& a, uint32& b, uint32& c) { // 32bit version | 46 | 154M | a -= b; a -= c; a ^= (c>>13); | 47 | 154M | b -= c; b -= a; b ^= (a<<8); | 48 | 154M | c -= a; c -= b; c ^= (b>>13); | 49 | 154M | a -= b; a -= c; a ^= (c>>12); | 50 | 154M | b -= c; b -= a; b ^= (a<<16); | 51 | 154M | c -= a; c -= b; c ^= (b>>5); | 52 | 154M | a -= b; a -= c; a ^= (c>>3); | 53 | 154M | b -= c; b -= a; b ^= (a<<10); | 54 | 154M | c -= a; c -= b; c ^= (b>>15); | 55 | 154M | } |
Unexecuted instantiation: split.cc:mix(unsigned int&, unsigned int&, unsigned int&) Unexecuted instantiation: stringpiece.cc:mix(unsigned int&, unsigned int&, unsigned int&) Unexecuted instantiation: partition.cc:mix(unsigned int&, unsigned int&, unsigned int&) Unexecuted instantiation: yb_partition.cc:mix(unsigned int&, unsigned int&, unsigned int&) |
56 | | |
57 | 73.7M | static inline void mix(uint64& a, uint64& b, uint64& c) { // 64bit version |
58 | 73.7M | a -= b; a -= c; a ^= (c>>43); |
59 | 73.7M | b -= c; b -= a; b ^= (a<<9); |
60 | 73.7M | c -= a; c -= b; c ^= (b>>8); |
61 | 73.7M | a -= b; a -= c; a ^= (c>>38); |
62 | 73.7M | b -= c; b -= a; b ^= (a<<23); |
63 | 73.7M | c -= a; c -= b; c ^= (b>>5); |
64 | 73.7M | a -= b; a -= c; a ^= (c>>35); |
65 | 73.7M | b -= c; b -= a; b ^= (a<<49); |
66 | 73.7M | c -= a; c -= b; c ^= (b>>11); |
67 | 73.7M | a -= b; a -= c; a ^= (c>>12); |
68 | 73.7M | b -= c; b -= a; b ^= (a<<18); |
69 | 73.7M | c -= a; c -= b; c ^= (b>>22); |
70 | 73.7M | } Unexecuted instantiation: hash.cc:mix(unsigned long long&, unsigned long long&, unsigned long long&) jenkins.cc:mix(unsigned long long&, unsigned long long&, unsigned long long&) Line | Count | Source | 57 | 73.7M | static inline void mix(uint64& a, uint64& b, uint64& c) { // 64bit version | 58 | 73.7M | a -= b; a -= c; a ^= (c>>43); | 59 | 73.7M | b -= c; b -= a; b ^= (a<<9); | 60 | 73.7M | c -= a; c -= b; c ^= (b>>8); | 61 | 73.7M | a -= b; a -= c; a ^= (c>>38); | 62 | 73.7M | b -= c; b -= a; b ^= (a<<23); | 63 | 73.7M | c -= a; c -= b; c ^= (b>>5); | 64 | 73.7M | a -= b; a -= c; a ^= (c>>35); | 65 | 73.7M | b -= c; b -= a; b ^= (a<<49); | 66 | 73.7M | c -= a; c -= b; c ^= (b>>11); | 67 | 73.7M | a -= b; a -= c; a ^= (c>>12); | 68 | 73.7M | b -= c; b -= a; b ^= (a<<18); | 69 | 73.7M | c -= a; c -= b; c ^= (b>>22); | 70 | 73.7M | } |
Unexecuted instantiation: split.cc:mix(unsigned long long&, unsigned long long&, unsigned long long&) Unexecuted instantiation: stringpiece.cc:mix(unsigned long long&, unsigned long long&, unsigned long long&) Unexecuted instantiation: partition.cc:mix(unsigned long long&, unsigned long long&, unsigned long long&) Unexecuted instantiation: yb_partition.cc:mix(unsigned long long&, unsigned long long&, unsigned long long&) |
71 | | |
72 | | |
73 | | // Load an unaligned little endian word from memory. |
74 | | // |
75 | | // These routines are named Word32At(), Word64At() and Google1At(). |
76 | | // Long ago, the 32-bit version of this operation was implemented using |
77 | | // signed characters. The hash function that used this variant creates |
78 | | // persistent hash values. The hash routine needs to remain backwards |
79 | | // compatible, so we renamed the word loading function 'Google1At' to |
80 | | // make it clear this implements special functionality. |
81 | | // |
82 | | // If a machine has alignment constraints or is big endian, we must |
83 | | // load the word a byte at a time. Otherwise we can load the whole word |
84 | | // from memory. |
85 | | // |
86 | | // [Plausibly, Word32At() and Word64At() should really be called |
87 | | // UNALIGNED_LITTLE_ENDIAN_LOAD32() and UNALIGNED_LITTLE_ENDIAN_LOAD64() |
88 | | // but that seems overly verbose.] |
89 | | |
90 | 107M | static inline uint64 Word64At(const char *ptr) { |
91 | 107M | return LittleEndian::Load64(ptr); |
92 | 107M | } Unexecuted instantiation: hash.cc:Word64At(char const*) jenkins.cc:Word64At(char const*) Line | Count | Source | 90 | 107M | static inline uint64 Word64At(const char *ptr) { | 91 | 107M | return LittleEndian::Load64(ptr); | 92 | 107M | } |
Unexecuted instantiation: split.cc:Word64At(char const*) Unexecuted instantiation: stringpiece.cc:Word64At(char const*) Unexecuted instantiation: partition.cc:Word64At(char const*) Unexecuted instantiation: yb_partition.cc:Word64At(char const*) |
93 | | |
94 | 0 | static inline uint32 Word32At(const char *ptr) { |
95 | 0 | return LittleEndian::Load32(ptr); |
96 | 0 | } Unexecuted instantiation: hash.cc:Word32At(char const*) Unexecuted instantiation: jenkins.cc:Word32At(char const*) Unexecuted instantiation: split.cc:Word32At(char const*) Unexecuted instantiation: stringpiece.cc:Word32At(char const*) Unexecuted instantiation: partition.cc:Word32At(char const*) Unexecuted instantiation: yb_partition.cc:Word32At(char const*) |
97 | | |
98 | | // This produces the same results as the byte-by-byte version below. |
99 | | // Here, we mask off the sign bits and subtract off two copies. To |
100 | | // see why this is the same as adding together the sign extensions, |
101 | | // start by considering the low-order byte. If we loaded an unsigned |
102 | | // word and wanted to sign extend it, we isolate the sign bit and subtract |
103 | | // that from zero which gives us a sequence of bits matching the sign bit |
104 | | // at and above the sign bit. If we remove (subtract) the sign bit and |
105 | | // add in the low order byte, we now have a sign-extended byte as desired. |
106 | | // We can then operate on all four bytes in parallel because addition |
107 | | // is associative and commutative. |
108 | | // |
109 | | // For example, consider sign extending the bytes 0x01 and 0x81. For 0x01, |
110 | | // the sign bit is zero, and 0x01 - 0 -0 = 1. For 0x81, the sign bit is 1 |
111 | | // and we are computing 0x81 - 0x80 + (-0x80) == 0x01 + 0xFFFFFF80. |
112 | | // |
113 | | // Similarily, if we start with 0x8200 and want to sign extend that, |
114 | | // we end up calculating 0x8200 - 0x8000 + (-0x8000) == 0xFFFF8000 + 0x0200 |
115 | | // |
116 | | // Suppose we have two bytes at the same time. Doesn't the adding of all |
117 | | // those F's generate something wierd? Ignore the F's and reassociate |
118 | | // the addition. For 0x8281, processing the bytes one at a time (like |
119 | | // we used to do) calculates |
120 | | // [0x8200 - 0x8000 + (-0x8000)] + [0x0081 - 0x80 + (-0x80)] |
121 | | // == 0x8281 - 0x8080 - 0x8000 - 0x80 |
122 | | // == 0x8281 - 0x8080 - 0x8080 |
123 | | // |
124 | | // Byte-by-byte version: |
125 | | // |
126 | | // static inline uint32 Google1At(const char *ptr2) { |
127 | | // const schar * ptr = reinterpret_cast<const schar *>(ptr2); |
128 | | // return (static_cast<schar>(ptr[0]) + |
129 | | // (static_cast<uint32>(ptr[1]) << 8) + |
130 | | // (static_cast<uint32>(ptr[2]) << 16) + |
131 | | // (static_cast<uint32>(ptr[3]) << 24)); |
132 | | // } |
133 | | |
134 | 215M | static inline uint32 Google1At(const char *ptr) { |
135 | 215M | uint32 t = LittleEndian::Load32(ptr); |
136 | 215M | uint32 masked = t & 0x80808080; |
137 | 215M | return t - masked - masked; |
138 | 215M | } Unexecuted instantiation: hash.cc:Google1At(char const*) jenkins.cc:Google1At(char const*) Line | Count | Source | 134 | 215M | static inline uint32 Google1At(const char *ptr) { | 135 | 215M | uint32 t = LittleEndian::Load32(ptr); | 136 | 215M | uint32 masked = t & 0x80808080; | 137 | 215M | return t - masked - masked; | 138 | 215M | } |
Unexecuted instantiation: split.cc:Google1At(char const*) Unexecuted instantiation: stringpiece.cc:Google1At(char const*) Unexecuted instantiation: partition.cc:Google1At(char const*) Unexecuted instantiation: yb_partition.cc:Google1At(char const*) |
139 | | |
140 | | // Historically, WORD_HASH has always been defined as we always run on |
141 | | // machines that don't NEED_ALIGNED_LOADS and which IS_LITTLE_ENDIAN. |
142 | | // |
143 | | // TODO(user): find occurences of WORD_HASH and adjust the code to |
144 | | // use more meaningful concepts. |
145 | | # define WORD_HASH |
146 | | |
147 | | #endif // UTIL_HASH_JENKINS_LOOKUP2_H_ |