YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/dynamic_bloom.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2
// This source code is licensed under the BSD-style license found in the
3
// LICENSE file in the root directory of this source tree. An additional grant
4
// of patent rights can be found in the PATENTS file in the same directory.
5
//
6
// The following only applies to changes made to this file as part of YugaByte development.
7
//
8
// Portions Copyright (c) YugaByte, Inc.
9
//
10
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
11
// in compliance with the License.  You may obtain a copy of the License at
12
//
13
// http://www.apache.org/licenses/LICENSE-2.0
14
//
15
// Unless required by applicable law or agreed to in writing, software distributed under the License
16
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
17
// or implied.  See the License for the specific language governing permissions and limitations
18
// under the License.
19
//
20
#ifndef ROCKSDB_UTIL_DYNAMIC_BLOOM_H
21
#define ROCKSDB_UTIL_DYNAMIC_BLOOM_H
22
23
#pragma once
24
25
#include <atomic>
26
#include <memory>
27
#include <string>
28
29
#include "yb/util/slice.h"
30
31
#include "yb/rocksdb/port/port.h"
32
33
namespace rocksdb {
34
35
class Allocator;
36
class Logger;
37
38
class DynamicBloom {
39
 public:
40
  // allocator: pass allocator to bloom filter, hence trace the usage of memory
41
  // total_bits: fixed total bits for the bloom
42
  // num_probes: number of hash probes for a single key
43
  // locality:  If positive, optimize for cache line locality, 0 otherwise.
44
  // hash_func:  customized hash function
45
  // huge_page_tlb_size:  if >0, try to allocate bloom bytes from huge page TLB
46
  //                      withi this page size. Need to reserve huge pages for
47
  //                      it to be allocated, like:
48
  //                         sysctl -w vm.nr_hugepages=20
49
  //                     See linux doc Documentation/vm/hugetlbpage.txt
50
  explicit DynamicBloom(Allocator* allocator,
51
                        uint32_t total_bits, uint32_t locality = 0,
52
                        uint32_t num_probes = 6,
53
                        uint32_t (*hash_func)(const Slice& key) = nullptr,
54
                        size_t huge_page_tlb_size = 0,
55
                        Logger* logger = nullptr);
56
57
  explicit DynamicBloom(uint32_t num_probes = 6,
58
                        uint32_t (*hash_func)(const Slice& key) = nullptr);
59
60
  void SetTotalBits(Allocator* allocator, uint32_t total_bits,
61
                    uint32_t locality, size_t huge_page_tlb_size,
62
                    Logger* logger);
63
64
5.28k
  ~DynamicBloom() {}
65
66
  // Assuming single threaded access to this function.
67
  void Add(const Slice& key);
68
69
  // Like Add, but may be called concurrent with other functions.
70
  void AddConcurrently(const Slice& key);
71
72
  // Assuming single threaded access to this function.
73
  void AddHash(uint32_t hash);
74
75
  // Like AddHash, but may be called concurrent with other functions.
76
  void AddHashConcurrently(uint32_t hash);
77
78
  // Multithreaded access to this function is OK
79
  bool MayContain(const Slice& key) const;
80
81
  // Multithreaded access to this function is OK
82
  bool MayContainHash(uint32_t hash) const;
83
84
  void Prefetch(uint32_t h);
85
86
64
  uint32_t GetNumBlocks() const { return kNumBlocks; }
87
88
64
  Slice GetRawData() const {
89
64
    return Slice(reinterpret_cast<char*>(data_), GetTotalBits() / 8);
90
64
  }
91
92
  void SetRawData(unsigned char* raw_data, uint32_t total_bits,
93
                  uint32_t num_blocks = 0);
94
95
64
  uint32_t GetTotalBits() const { return kTotalBits; }
96
97
17.2M
  bool IsInitialized() const { return kNumBlocks > 0 || 
kTotalBits > 016.6M
; }
98
99
 private:
100
  uint32_t kTotalBits;
101
  uint32_t kNumBlocks;
102
  const uint32_t kNumProbes;
103
104
  uint32_t (*hash_func_)(const Slice& key);
105
  std::atomic<uint8_t>* data_;
106
107
  // or_func(ptr, mask) should effect *ptr |= mask with the appropriate
108
  // concurrency safety, working with bytes.
109
  template <typename OrFunc>
110
  void AddHash(uint32_t hash, const OrFunc& or_func);
111
};
112
113
520k
inline void DynamicBloom::Add(const Slice& key) { AddHash(hash_func_(key)); }
114
115
inline void DynamicBloom::AddConcurrently(const Slice& key) {
116
  AddHashConcurrently(hash_func_(key));
117
}
118
119
1.19M
inline void DynamicBloom::AddHash(uint32_t hash) {
120
8.76M
  AddHash(hash, [](std::atomic<uint8_t>* ptr, uint8_t mask) {
121
8.76M
    ptr->store(ptr->load(std::memory_order_relaxed) | mask,
122
8.76M
               std::memory_order_relaxed);
123
8.76M
  });
124
1.19M
}
125
126
inline void DynamicBloom::AddHashConcurrently(uint32_t hash) {
127
  AddHash(hash, [](std::atomic<uint8_t>* ptr, uint8_t mask) {
128
    // Happens-before between AddHash and MaybeContains is handled by
129
    // access to versions_->LastSequence(), so all we have to do here is
130
    // avoid races (so we don't give the compiler a license to mess up
131
    // our code) and not lose bits.  std::memory_order_relaxed is enough
132
    // for that.
133
    if ((mask & ptr->load(std::memory_order_relaxed)) != mask) {
134
      ptr->fetch_or(mask, std::memory_order_relaxed);
135
    }
136
  });
137
}
138
139
9.49M
inline bool DynamicBloom::MayContain(const Slice& key) const {
140
9.49M
  return (MayContainHash(hash_func_(key)));
141
9.49M
}
142
143
0
inline void DynamicBloom::Prefetch(uint32_t h) {
144
0
  if (kNumBlocks != 0) {
145
0
    uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8);
146
0
    PREFETCH(&(data_[b]), 0, 3);
147
0
  }
148
0
}
Unexecuted instantiation: rocksdb::DynamicBloom::Prefetch(unsigned int)
Unexecuted instantiation: rocksdb::DynamicBloom::Prefetch(unsigned int)
149
150
11.6M
inline bool DynamicBloom::MayContainHash(uint32_t h) const {
151
11.6M
  assert(IsInitialized());
152
0
  const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
153
11.6M
  if (kNumBlocks != 0) {
154
490k
    uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8);
155
1.39M
    for (uint32_t i = 0; i < kNumProbes; 
++i902k
) {
156
      // Since CACHE_LINE_SIZE is defined as 2^n, this line will be optimized
157
      //  to a simple and operation by compiler.
158
1.27M
      const uint32_t bitpos = b + (h % (CACHE_LINE_SIZE * 8));
159
1.27M
      uint8_t byteval = data_[bitpos / 8].load(std::memory_order_relaxed);
160
1.27M
      if ((byteval & (1 << (bitpos % 8))) == 0) {
161
368k
        return false;
162
368k
      }
163
      // Rotate h so that we don't reuse the same bytes.
164
902k
      h = h / (CACHE_LINE_SIZE * 8) +
165
902k
          (h % (CACHE_LINE_SIZE * 8)) * (0x20000000U / CACHE_LINE_SIZE);
166
902k
      h += delta;
167
902k
    }
168
11.2M
  } else {
169
37.7M
    for (uint32_t i = 0; i < kNumProbes; 
++i26.5M
) {
170
30.5M
      const uint32_t bitpos = h % kTotalBits;
171
30.5M
      uint8_t byteval = data_[bitpos / 8].load(std::memory_order_relaxed);
172
30.5M
      if ((byteval & (1 << (bitpos % 8))) == 0) {
173
4.03M
        return false;
174
4.03M
      }
175
26.5M
      h += delta;
176
26.5M
    }
177
11.2M
  }
178
7.28M
  return true;
179
11.6M
}
180
181
template <typename OrFunc>
182
1.19M
inline void DynamicBloom::AddHash(uint32_t h, const OrFunc& or_func) {
183
1.19M
  assert(IsInitialized());
184
0
  const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
185
1.19M
  if (kNumBlocks != 0) {
186
59.9k
    uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8);
187
419k
    for (uint32_t i = 0; i < kNumProbes; 
++i359k
) {
188
      // Since CACHE_LINE_SIZE is defined as 2^n, this line will be optimized
189
      // to a simple and operation by compiler.
190
359k
      const uint32_t bitpos = b + (h % (CACHE_LINE_SIZE * 8));
191
359k
      or_func(&data_[bitpos / 8], (1 << (bitpos % 8)));
192
      // Rotate h so that we don't reuse the same bytes.
193
359k
      h = h / (CACHE_LINE_SIZE * 8) +
194
359k
          (h % (CACHE_LINE_SIZE * 8)) * (0x20000000U / CACHE_LINE_SIZE);
195
359k
      h += delta;
196
359k
    }
197
1.13M
  } else {
198
9.54M
    for (uint32_t i = 0; i < kNumProbes; 
++i8.40M
) {
199
8.40M
      const uint32_t bitpos = h % kTotalBits;
200
8.40M
      or_func(&data_[bitpos / 8], (1 << (bitpos % 8)));
201
8.40M
      h += delta;
202
8.40M
    }
203
1.13M
  }
204
1.19M
}
205
206
} // namespace rocksdb
207
208
#endif // ROCKSDB_UTIL_DYNAMIC_BLOOM_H