YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/util/inline_slice.h
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
//
18
// The following only applies to changes made to this file as part of YugaByte development.
19
//
20
// Portions Copyright (c) YugaByte, Inc.
21
//
22
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
23
// in compliance with the License.  You may obtain a copy of the License at
24
//
25
// http://www.apache.org/licenses/LICENSE-2.0
26
//
27
// Unless required by applicable law or agreed to in writing, software distributed under the License
28
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
29
// or implied.  See the License for the specific language governing permissions and limitations
30
// under the License.
31
//
32
#ifndef YB_UTIL_INLINE_SLICE_H
33
#define YB_UTIL_INLINE_SLICE_H
34
35
#include <atomic>
36
#include <new>
37
38
#include <boost/static_assert.hpp>
39
#include <glog/logging.h>
40
41
#include "yb/gutil/atomicops.h"
42
#include "yb/gutil/casts.h"
43
44
#include "yb/util/slice.h"
45
46
namespace yb {
47
48
#if __BYTE_ORDER != __LITTLE_ENDIAN
49
#error This needs to be ported for big endian
50
#endif
51
52
// Class which represents short strings inline, and stores longer ones
53
// by instead storing a pointer.
54
//
55
// Internal format:
56
// The buffer must be at least as large as a pointer (eg 8 bytes for 64-bit).
57
// Let ptr = bit-casting the first 8 bytes as a pointer:
58
// If buf_[0] < 0xff:
59
//   buf_[0] == length of stored data
60
//   buf_[1..1 + buf_[0]] == inline data
61
// If buf_[0] == 0xff:
62
//   buf_[1..sizeof(uint8_t *)] == pointer to indirect data, minus the MSB.
63
//   buf_[sizeof(uint8_t *)..] = unused
64
// TODO: we could store a prefix of the indirect data in this unused space
65
// in the future, which might be able to short-circuit some comparisons
66
//
67
// The indirect data which is pointed to is stored as a 4 byte length followed by
68
// the actual data.
69
//
70
// This class relies on the fact that the most significant bit of any x86 pointer is
71
// 0 (i.e pointers only use the bottom 48 bits)
72
//
73
// If ATOMIC is true, then this class has the semantics that readers will never see
74
// invalid pointers, even in the case of concurrent access. However, they _may_ see
75
// invalid *data*. That is to say, calling 'as_slice()' will always return a slice
76
// which points to a valid memory region -- the memory region may contain garbage
77
// but will not cause a segfault on access.
78
//
79
// These ATOMIC semantics may seem too loose to be useful, but can be used in
80
// optimistic concurrency control schemes -- so long as accessing the slice doesn't
81
// produce a segfault, it's OK to read bad data on a race because the higher-level
82
// concurrency control will cause a retry.
83
template<size_t STORAGE_SIZE, bool ATOMIC = false>
84
class InlineSlice {
85
 private:
86
  enum {
87
    kPointerByteWidth = sizeof(uintptr_t),
88
    kPointerBitWidth = kPointerByteWidth * 8,
89
    kMaxInlineData = STORAGE_SIZE - 1
90
  };
91
92
  BOOST_STATIC_ASSERT(STORAGE_SIZE >= kPointerByteWidth);
93
  BOOST_STATIC_ASSERT(STORAGE_SIZE <= 256);
94
 public:
95
3
  InlineSlice() {
96
3
  }
_ZN2yb11InlineSliceILm8ELb0EEC2Ev
Line
Count
Source
95
1
  InlineSlice() {
96
1
  }
_ZN2yb11InlineSliceILm12ELb0EEC2Ev
Line
Count
Source
95
1
  InlineSlice() {
96
1
  }
_ZN2yb11InlineSliceILm16ELb0EEC2Ev
Line
Count
Source
95
1
  InlineSlice() {
96
1
  }
97
98
6.00k
  inline const Slice as_slice() const ATTRIBUTE_ALWAYS_INLINE {
99
6.00k
    DiscriminatedPointer dptr = LoadValue();
100
101
6.00k
    if (dptr.is_indirect()) {
102
5.93k
      const uint8_t *indir_data = reinterpret_cast<const uint8_t *>(dptr.pointer);
103
5.93k
      uint32_t len = *reinterpret_cast<const uint32_t *>(indir_data);
104
5.93k
      indir_data += sizeof(uint32_t);
105
5.93k
      return Slice(indir_data, (size_t)len);
106
69
    } else {
107
69
      uint8_t len = dptr.discriminator;
108
69
      DCHECK_LE(len, STORAGE_SIZE - 1);
109
69
      return Slice(&buf_[1], len);
110
69
    }
111
6.00k
  }
_ZNK2yb11InlineSliceILm8ELb0EE8as_sliceEv
Line
Count
Source
98
2.00k
  inline const Slice as_slice() const ATTRIBUTE_ALWAYS_INLINE {
99
2.00k
    DiscriminatedPointer dptr = LoadValue();
100
101
2.00k
    if (dptr.is_indirect()) {
102
1.98k
      const uint8_t *indir_data = reinterpret_cast<const uint8_t *>(dptr.pointer);
103
1.98k
      uint32_t len = *reinterpret_cast<const uint32_t *>(indir_data);
104
1.98k
      indir_data += sizeof(uint32_t);
105
1.98k
      return Slice(indir_data, (size_t)len);
106
15
    } else {
107
15
      uint8_t len = dptr.discriminator;
108
15
      DCHECK_LE(len, STORAGE_SIZE - 1);
109
15
      return Slice(&buf_[1], len);
110
15
    }
111
2.00k
  }
_ZNK2yb11InlineSliceILm12ELb0EE8as_sliceEv
Line
Count
Source
98
2.00k
  inline const Slice as_slice() const ATTRIBUTE_ALWAYS_INLINE {
99
2.00k
    DiscriminatedPointer dptr = LoadValue();
100
101
2.00k
    if (dptr.is_indirect()) {
102
1.97k
      const uint8_t *indir_data = reinterpret_cast<const uint8_t *>(dptr.pointer);
103
1.97k
      uint32_t len = *reinterpret_cast<const uint32_t *>(indir_data);
104
1.97k
      indir_data += sizeof(uint32_t);
105
1.97k
      return Slice(indir_data, (size_t)len);
106
23
    } else {
107
23
      uint8_t len = dptr.discriminator;
108
23
      DCHECK_LE(len, STORAGE_SIZE - 1);
109
23
      return Slice(&buf_[1], len);
110
23
    }
111
2.00k
  }
_ZNK2yb11InlineSliceILm16ELb0EE8as_sliceEv
Line
Count
Source
98
2.00k
  inline const Slice as_slice() const ATTRIBUTE_ALWAYS_INLINE {
99
2.00k
    DiscriminatedPointer dptr = LoadValue();
100
101
2.00k
    if (dptr.is_indirect()) {
102
1.96k
      const uint8_t *indir_data = reinterpret_cast<const uint8_t *>(dptr.pointer);
103
1.96k
      uint32_t len = *reinterpret_cast<const uint32_t *>(indir_data);
104
1.96k
      indir_data += sizeof(uint32_t);
105
1.96k
      return Slice(indir_data, (size_t)len);
106
31
    } else {
107
31
      uint8_t len = dptr.discriminator;
108
31
      DCHECK_LE(len, STORAGE_SIZE - 1);
109
31
      return Slice(&buf_[1], len);
110
31
    }
111
2.00k
  }
112
113
  template<class ArenaType>
114
6.00k
  void set(const Slice &src, ArenaType *alloc_arena) {
115
6.00k
    set(src.data(), src.size(), alloc_arena);
116
6.00k
  }
_ZN2yb11InlineSliceILm8ELb0EE3setINS_8internal9ArenaBaseINS3_11ArenaTraitsEEEEEvRKNS_5SliceEPT_
Line
Count
Source
114
2.00k
  void set(const Slice &src, ArenaType *alloc_arena) {
115
2.00k
    set(src.data(), src.size(), alloc_arena);
116
2.00k
  }
_ZN2yb11InlineSliceILm12ELb0EE3setINS_8internal9ArenaBaseINS3_11ArenaTraitsEEEEEvRKNS_5SliceEPT_
Line
Count
Source
114
2.00k
  void set(const Slice &src, ArenaType *alloc_arena) {
115
2.00k
    set(src.data(), src.size(), alloc_arena);
116
2.00k
  }
_ZN2yb11InlineSliceILm16ELb0EE3setINS_8internal9ArenaBaseINS3_11ArenaTraitsEEEEEvRKNS_5SliceEPT_
Line
Count
Source
114
2.00k
  void set(const Slice &src, ArenaType *alloc_arena) {
115
2.00k
    set(src.data(), src.size(), alloc_arena);
116
2.00k
  }
117
118
  template<class ArenaType>
119
6.00k
  void set(const uint8_t *src, size_t len, ArenaType *alloc_arena) {
120
6.00k
    if (len <= kMaxInlineData) {
121
69
      if (ATOMIC) {
122
        // If atomic, we need to make sure that we store the discriminator
123
        // before we copy in any data. Otherwise the data would overwrite
124
        // part of a pointer and a reader might see an invalid address.
125
0
        DiscriminatedPointer dptr;
126
0
        dptr.discriminator = len;
127
0
        dptr.pointer = 0; // will be overwritten
128
        // "Acquire" ensures that the later memcpy doesn't reorder above the
129
        // set of the discriminator bit.
130
0
        base::subtle::Acquire_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
131
0
                                    bit_cast<uintptr_t>(dptr));
132
69
      } else {
133
69
        buf_[0] = len;
134
69
      }
135
69
      memcpy(&buf_[1], src, len);
136
137
5.93k
    } else {
138
      // TODO: if already indirect and the current storage has enough space, just reuse that.
139
140
      // Set up the pointed-to data before setting a pointer to it. This ensures that readers
141
      // never see a pointer to an invalid region (i.e one without a proper length header).
142
5.93k
      void *in_arena = CHECK_NOTNULL(alloc_arena->AllocateBytesAligned(len + sizeof(uint32_t),
143
5.93k
                                                                       alignof(uint32_t)));
144
5.93k
      *reinterpret_cast<uint32_t *>(in_arena) = narrow_cast<uint32_t>(len);
145
5.93k
      memcpy(reinterpret_cast<uint8_t *>(in_arena) + sizeof(uint32_t), src, len);
146
5.93k
      set_ptr(in_arena);
147
5.93k
    }
148
6.00k
  }
_ZN2yb11InlineSliceILm8ELb0EE3setINS_8internal9ArenaBaseINS3_11ArenaTraitsEEEEEvPKhmPT_
Line
Count
Source
119
2.00k
  void set(const uint8_t *src, size_t len, ArenaType *alloc_arena) {
120
2.00k
    if (len <= kMaxInlineData) {
121
15
      if (ATOMIC) {
122
        // If atomic, we need to make sure that we store the discriminator
123
        // before we copy in any data. Otherwise the data would overwrite
124
        // part of a pointer and a reader might see an invalid address.
125
0
        DiscriminatedPointer dptr;
126
0
        dptr.discriminator = len;
127
0
        dptr.pointer = 0; // will be overwritten
128
        // "Acquire" ensures that the later memcpy doesn't reorder above the
129
        // set of the discriminator bit.
130
0
        base::subtle::Acquire_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
131
0
                                    bit_cast<uintptr_t>(dptr));
132
15
      } else {
133
15
        buf_[0] = len;
134
15
      }
135
15
      memcpy(&buf_[1], src, len);
136
137
1.98k
    } else {
138
      // TODO: if already indirect and the current storage has enough space, just reuse that.
139
140
      // Set up the pointed-to data before setting a pointer to it. This ensures that readers
141
      // never see a pointer to an invalid region (i.e one without a proper length header).
142
1.98k
      void *in_arena = CHECK_NOTNULL(alloc_arena->AllocateBytesAligned(len + sizeof(uint32_t),
143
1.98k
                                                                       alignof(uint32_t)));
144
1.98k
      *reinterpret_cast<uint32_t *>(in_arena) = narrow_cast<uint32_t>(len);
145
1.98k
      memcpy(reinterpret_cast<uint8_t *>(in_arena) + sizeof(uint32_t), src, len);
146
1.98k
      set_ptr(in_arena);
147
1.98k
    }
148
2.00k
  }
_ZN2yb11InlineSliceILm12ELb0EE3setINS_8internal9ArenaBaseINS3_11ArenaTraitsEEEEEvPKhmPT_
Line
Count
Source
119
2.00k
  void set(const uint8_t *src, size_t len, ArenaType *alloc_arena) {
120
2.00k
    if (len <= kMaxInlineData) {
121
23
      if (ATOMIC) {
122
        // If atomic, we need to make sure that we store the discriminator
123
        // before we copy in any data. Otherwise the data would overwrite
124
        // part of a pointer and a reader might see an invalid address.
125
0
        DiscriminatedPointer dptr;
126
0
        dptr.discriminator = len;
127
0
        dptr.pointer = 0; // will be overwritten
128
        // "Acquire" ensures that the later memcpy doesn't reorder above the
129
        // set of the discriminator bit.
130
0
        base::subtle::Acquire_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
131
0
                                    bit_cast<uintptr_t>(dptr));
132
23
      } else {
133
23
        buf_[0] = len;
134
23
      }
135
23
      memcpy(&buf_[1], src, len);
136
137
1.97k
    } else {
138
      // TODO: if already indirect and the current storage has enough space, just reuse that.
139
140
      // Set up the pointed-to data before setting a pointer to it. This ensures that readers
141
      // never see a pointer to an invalid region (i.e one without a proper length header).
142
1.97k
      void *in_arena = CHECK_NOTNULL(alloc_arena->AllocateBytesAligned(len + sizeof(uint32_t),
143
1.97k
                                                                       alignof(uint32_t)));
144
1.97k
      *reinterpret_cast<uint32_t *>(in_arena) = narrow_cast<uint32_t>(len);
145
1.97k
      memcpy(reinterpret_cast<uint8_t *>(in_arena) + sizeof(uint32_t), src, len);
146
1.97k
      set_ptr(in_arena);
147
1.97k
    }
148
2.00k
  }
_ZN2yb11InlineSliceILm16ELb0EE3setINS_8internal9ArenaBaseINS3_11ArenaTraitsEEEEEvPKhmPT_
Line
Count
Source
119
2.00k
  void set(const uint8_t *src, size_t len, ArenaType *alloc_arena) {
120
2.00k
    if (len <= kMaxInlineData) {
121
31
      if (ATOMIC) {
122
        // If atomic, we need to make sure that we store the discriminator
123
        // before we copy in any data. Otherwise the data would overwrite
124
        // part of a pointer and a reader might see an invalid address.
125
0
        DiscriminatedPointer dptr;
126
0
        dptr.discriminator = len;
127
0
        dptr.pointer = 0; // will be overwritten
128
        // "Acquire" ensures that the later memcpy doesn't reorder above the
129
        // set of the discriminator bit.
130
0
        base::subtle::Acquire_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
131
0
                                    bit_cast<uintptr_t>(dptr));
132
31
      } else {
133
31
        buf_[0] = len;
134
31
      }
135
31
      memcpy(&buf_[1], src, len);
136
137
1.96k
    } else {
138
      // TODO: if already indirect and the current storage has enough space, just reuse that.
139
140
      // Set up the pointed-to data before setting a pointer to it. This ensures that readers
141
      // never see a pointer to an invalid region (i.e one without a proper length header).
142
1.96k
      void *in_arena = CHECK_NOTNULL(alloc_arena->AllocateBytesAligned(len + sizeof(uint32_t),
143
1.96k
                                                                       alignof(uint32_t)));
144
1.96k
      *reinterpret_cast<uint32_t *>(in_arena) = narrow_cast<uint32_t>(len);
145
1.96k
      memcpy(reinterpret_cast<uint8_t *>(in_arena) + sizeof(uint32_t), src, len);
146
1.96k
      set_ptr(in_arena);
147
1.96k
    }
148
2.00k
  }
149
150
 private:
151
  struct DiscriminatedPointer {
152
    uint8_t discriminator : 8;
153
    uintptr_t pointer : 54;
154
155
6.00k
    bool is_indirect() const {
156
6.00k
      return discriminator == 0xff;
157
6.00k
    }
_ZNK2yb11InlineSliceILm8ELb0EE20DiscriminatedPointer11is_indirectEv
Line
Count
Source
155
2.00k
    bool is_indirect() const {
156
2.00k
      return discriminator == 0xff;
157
2.00k
    }
_ZNK2yb11InlineSliceILm12ELb0EE20DiscriminatedPointer11is_indirectEv
Line
Count
Source
155
2.00k
    bool is_indirect() const {
156
2.00k
      return discriminator == 0xff;
157
2.00k
    }
_ZNK2yb11InlineSliceILm16ELb0EE20DiscriminatedPointer11is_indirectEv
Line
Count
Source
155
2.00k
    bool is_indirect() const {
156
2.00k
      return discriminator == 0xff;
157
2.00k
    }
158
  };
159
160
6.00k
  DiscriminatedPointer LoadValue() const {
161
6.00k
    if (ATOMIC) {
162
      // Load with "Acquire" semantics -- if we load a pointer, this ensures
163
      // that we also see the pointed-to data.
164
0
      uintptr_t ptr_val = base::subtle::Acquire_Load(
165
0
        reinterpret_cast<volatile const AtomicWord *>(buf_));
166
0
      return bit_cast<DiscriminatedPointer>(ptr_val);
167
6.00k
    } else {
168
6.00k
      DiscriminatedPointer ret;
169
6.00k
      memcpy(&ret, buf_, sizeof(ret));
170
6.00k
      return ret;
171
6.00k
    }
172
6.00k
  }
_ZNK2yb11InlineSliceILm8ELb0EE9LoadValueEv
Line
Count
Source
160
2.00k
  DiscriminatedPointer LoadValue() const {
161
2.00k
    if (ATOMIC) {
162
      // Load with "Acquire" semantics -- if we load a pointer, this ensures
163
      // that we also see the pointed-to data.
164
0
      uintptr_t ptr_val = base::subtle::Acquire_Load(
165
0
        reinterpret_cast<volatile const AtomicWord *>(buf_));
166
0
      return bit_cast<DiscriminatedPointer>(ptr_val);
167
2.00k
    } else {
168
2.00k
      DiscriminatedPointer ret;
169
2.00k
      memcpy(&ret, buf_, sizeof(ret));
170
2.00k
      return ret;
171
2.00k
    }
172
2.00k
  }
_ZNK2yb11InlineSliceILm12ELb0EE9LoadValueEv
Line
Count
Source
160
2.00k
  DiscriminatedPointer LoadValue() const {
161
2.00k
    if (ATOMIC) {
162
      // Load with "Acquire" semantics -- if we load a pointer, this ensures
163
      // that we also see the pointed-to data.
164
0
      uintptr_t ptr_val = base::subtle::Acquire_Load(
165
0
        reinterpret_cast<volatile const AtomicWord *>(buf_));
166
0
      return bit_cast<DiscriminatedPointer>(ptr_val);
167
2.00k
    } else {
168
2.00k
      DiscriminatedPointer ret;
169
2.00k
      memcpy(&ret, buf_, sizeof(ret));
170
2.00k
      return ret;
171
2.00k
    }
172
2.00k
  }
_ZNK2yb11InlineSliceILm16ELb0EE9LoadValueEv
Line
Count
Source
160
2.00k
  DiscriminatedPointer LoadValue() const {
161
2.00k
    if (ATOMIC) {
162
      // Load with "Acquire" semantics -- if we load a pointer, this ensures
163
      // that we also see the pointed-to data.
164
0
      uintptr_t ptr_val = base::subtle::Acquire_Load(
165
0
        reinterpret_cast<volatile const AtomicWord *>(buf_));
166
0
      return bit_cast<DiscriminatedPointer>(ptr_val);
167
2.00k
    } else {
168
2.00k
      DiscriminatedPointer ret;
169
2.00k
      memcpy(&ret, buf_, sizeof(ret));
170
2.00k
      return ret;
171
2.00k
    }
172
2.00k
  }
173
174
  // Set the internal storage to be an indirect pointer to the given
175
  // address.
176
5.93k
  void set_ptr(void *ptr) {
177
5.93k
    uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
178
0
    DCHECK_EQ(ptr_int >> (kPointerBitWidth - 8), 0) <<
179
0
      "bad pointer (should have 0x00 MSB): " << ptr;
180
181
5.93k
    DiscriminatedPointer dptr;
182
5.93k
    dptr.discriminator = 0xff;
183
5.93k
    dptr.pointer = ptr_int;
184
185
5.93k
    if (ATOMIC) {
186
      // Store with "Release" semantics -- this ensures that the pointed-to data
187
      // is visible to any readers who see this pointer.
188
0
      uintptr_t to_store = bit_cast<uintptr_t>(dptr);
189
0
      base::subtle::Release_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
190
0
                                  to_store);
191
5.93k
    } else {
192
5.93k
      memcpy(&buf_[0], &dptr, sizeof(dptr));
193
5.93k
    }
194
5.93k
  }
_ZN2yb11InlineSliceILm8ELb0EE7set_ptrEPv
Line
Count
Source
176
1.98k
  void set_ptr(void *ptr) {
177
1.98k
    uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
178
0
    DCHECK_EQ(ptr_int >> (kPointerBitWidth - 8), 0) <<
179
0
      "bad pointer (should have 0x00 MSB): " << ptr;
180
181
1.98k
    DiscriminatedPointer dptr;
182
1.98k
    dptr.discriminator = 0xff;
183
1.98k
    dptr.pointer = ptr_int;
184
185
1.98k
    if (ATOMIC) {
186
      // Store with "Release" semantics -- this ensures that the pointed-to data
187
      // is visible to any readers who see this pointer.
188
0
      uintptr_t to_store = bit_cast<uintptr_t>(dptr);
189
0
      base::subtle::Release_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
190
0
                                  to_store);
191
1.98k
    } else {
192
1.98k
      memcpy(&buf_[0], &dptr, sizeof(dptr));
193
1.98k
    }
194
1.98k
  }
_ZN2yb11InlineSliceILm12ELb0EE7set_ptrEPv
Line
Count
Source
176
1.97k
  void set_ptr(void *ptr) {
177
1.97k
    uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
178
0
    DCHECK_EQ(ptr_int >> (kPointerBitWidth - 8), 0) <<
179
0
      "bad pointer (should have 0x00 MSB): " << ptr;
180
181
1.97k
    DiscriminatedPointer dptr;
182
1.97k
    dptr.discriminator = 0xff;
183
1.97k
    dptr.pointer = ptr_int;
184
185
1.97k
    if (ATOMIC) {
186
      // Store with "Release" semantics -- this ensures that the pointed-to data
187
      // is visible to any readers who see this pointer.
188
0
      uintptr_t to_store = bit_cast<uintptr_t>(dptr);
189
0
      base::subtle::Release_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
190
0
                                  to_store);
191
1.97k
    } else {
192
1.97k
      memcpy(&buf_[0], &dptr, sizeof(dptr));
193
1.97k
    }
194
1.97k
  }
_ZN2yb11InlineSliceILm16ELb0EE7set_ptrEPv
Line
Count
Source
176
1.96k
  void set_ptr(void *ptr) {
177
1.96k
    uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
178
0
    DCHECK_EQ(ptr_int >> (kPointerBitWidth - 8), 0) <<
179
0
      "bad pointer (should have 0x00 MSB): " << ptr;
180
181
1.96k
    DiscriminatedPointer dptr;
182
1.96k
    dptr.discriminator = 0xff;
183
1.96k
    dptr.pointer = ptr_int;
184
185
1.96k
    if (ATOMIC) {
186
      // Store with "Release" semantics -- this ensures that the pointed-to data
187
      // is visible to any readers who see this pointer.
188
0
      uintptr_t to_store = bit_cast<uintptr_t>(dptr);
189
0
      base::subtle::Release_Store(reinterpret_cast<volatile AtomicWord *>(buf_),
190
0
                                  to_store);
191
1.96k
    } else {
192
1.96k
      memcpy(&buf_[0], &dptr, sizeof(dptr));
193
1.96k
    }
194
1.96k
  }
195
196
  uint8_t buf_[STORAGE_SIZE];
197
198
} PACKED;
199
200
} // namespace yb
201
202
#endif // YB_UTIL_INLINE_SLICE_H