/Users/deen/code/yugabyte-db/src/yb/rocksdb/util/thread_local.h
Line | Count | Source |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under the BSD-style license found in the |
3 | | // LICENSE file in the root directory of this source tree. An additional grant |
4 | | // of patent rights can be found in the PATENTS file in the same directory. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
21 | | // Use of this source code is governed by a BSD-style license that can be |
22 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
23 | | |
24 | | #pragma once |
25 | | |
26 | | #include <atomic> |
27 | | #include <unordered_map> |
28 | | #include <vector> |
29 | | |
30 | | #include "yb/rocksdb/port/port.h" |
31 | | #include "yb/rocksdb/util/autovector.h" |
32 | | |
33 | | #ifndef ROCKSDB_SUPPORT_THREAD_LOCAL // NOLINT |
34 | | #if !defined(OS_WIN) && !defined(OS_MACOSX) && !defined(IOS_CROSS_COMPILE) |
35 | | #define ROCKSDB_SUPPORT_THREAD_LOCAL 1 |
36 | | #else |
37 | | #define ROCKSDB_SUPPORT_THREAD_LOCAL 0 |
38 | | #endif |
39 | | #endif // NOLINT |
40 | | |
41 | | namespace rocksdb { |
42 | | |
43 | | // Cleanup function that will be called for a stored thread local |
44 | | // pointer (if not NULL) when one of the following happens: |
45 | | // (1) a thread terminates |
46 | | // (2) a ThreadLocalPtr is destroyed |
47 | | typedef void (*UnrefHandler)(void* ptr); |
48 | | |
49 | | // ThreadLocalPtr stores only values of pointer type. Different from |
50 | | // the usual thread-local-storage, ThreadLocalPtr has the ability to |
51 | | // distinguish data coming from different threads and different |
52 | | // ThreadLocalPtr instances. For example, if a regular thread_local |
53 | | // variable A is declared in DBImpl, two DBImpl objects would share |
54 | | // the same A. However, a ThreadLocalPtr that is defined under the |
55 | | // scope of DBImpl can avoid such confliction. As a result, its memory |
56 | | // usage would be O(# of threads * # of ThreadLocalPtr instances). |
57 | | class ThreadLocalPtr { |
58 | | public: |
59 | | explicit ThreadLocalPtr(UnrefHandler handler = nullptr); |
60 | | |
61 | | ~ThreadLocalPtr(); |
62 | | |
63 | | // Return the current pointer stored in thread local |
64 | | void* Get() const; |
65 | | |
66 | | // Set a new pointer value to the thread local storage. |
67 | | void Reset(void* ptr); |
68 | | |
69 | | // Atomically swap the supplied ptr and return the previous value |
70 | | void* Swap(void* ptr); |
71 | | |
72 | | // Atomically compare the stored value with expected. Set the new |
73 | | // pointer value to thread local only if the comparison is true. |
74 | | // Otherwise, expected returns the stored value. |
75 | | // Return true on success, false on failure |
76 | | bool CompareAndSwap(void* ptr, void*& expected); |
77 | | |
78 | | // Reset all thread local data to replacement, and return non-nullptr |
79 | | // data for all existing threads |
80 | | void Scrape(autovector<void*>* ptrs, void* const replacement); |
81 | | |
82 | | // Initialize the static singletons of the ThreadLocalPtr. |
83 | | // |
84 | | // If this function is not called, then the singletons will be |
85 | | // automatically initialized when they are used. |
86 | | // |
87 | | // Calling this function twice or after the singletons have been |
88 | | // initialized will be no-op. |
89 | | static void InitSingletons(); |
90 | | |
91 | | protected: |
92 | | struct Entry { |
93 | 7.71M | Entry() : ptr(nullptr) {} |
94 | 5.79M | Entry(const Entry& e) : ptr(e.ptr.load(std::memory_order_relaxed)) {} |
95 | | std::atomic<void*> ptr; |
96 | | }; |
97 | | |
98 | | class StaticMeta; |
99 | | |
100 | | // This is the structure that is declared as "thread_local" storage. |
101 | | // The vector keep list of atomic pointer for all instances for "current" |
102 | | // thread. The vector is indexed by an Id that is unique in process and |
103 | | // associated with one ThreadLocalPtr instance. The Id is assigned by a |
104 | | // global StaticMeta singleton. So if we instantiated 3 ThreadLocalPtr |
105 | | // instances, each thread will have a ThreadData with a vector of size 3: |
106 | | // --------------------------------------------------- |
107 | | // | | instance 1 | instance 2 | instnace 3 | |
108 | | // --------------------------------------------------- |
109 | | // | thread 1 | void* | void* | void* | <- ThreadData |
110 | | // --------------------------------------------------- |
111 | | // | thread 2 | void* | void* | void* | <- ThreadData |
112 | | // --------------------------------------------------- |
113 | | // | thread 3 | void* | void* | void* | <- ThreadData |
114 | | // --------------------------------------------------- |
115 | | struct ThreadData { |
116 | 128k | explicit ThreadData(StaticMeta* _inst) : entries(), inst(_inst) {} |
117 | | std::vector<Entry> entries; |
118 | | ThreadData* next; |
119 | | ThreadData* prev; |
120 | | StaticMeta* inst; |
121 | | }; |
122 | | |
123 | | class StaticMeta { |
124 | | public: |
125 | | StaticMeta(); |
126 | | |
127 | | // Return the next available Id |
128 | | uint32_t GetId(); |
129 | | |
130 | | // Return the next available Id without claiming it. |
131 | | uint32_t PeekId() const; |
132 | | |
133 | | // Return next available Ids in the order they will be used without claiming them. |
134 | | std::vector<uint32_t> PeekIds() const; |
135 | | |
136 | | // Return the given Id back to the free pool. This also triggers |
137 | | // UnrefHandler for associated pointer value (if not NULL) for all threads. |
138 | | void ReclaimId(uint32_t id); |
139 | | |
140 | | // Return the pointer value for the given id for the current thread. |
141 | | void* Get(uint32_t id) const; |
142 | | // Reset the pointer value for the given id for the current thread. |
143 | | // It triggers UnrefHanlder if the id has existing pointer value. |
144 | | void Reset(uint32_t id, void* ptr); |
145 | | // Atomically swap the supplied ptr and return the previous value |
146 | | void* Swap(uint32_t id, void* ptr); |
147 | | // Atomically compare and swap the provided value only if it equals |
148 | | // to expected value. |
149 | | bool CompareAndSwap(uint32_t id, void* ptr, void*& expected); |
150 | | // Reset all thread local data to replacement, and return non-nullptr |
151 | | // data for all existing threads |
152 | | void Scrape(uint32_t id, autovector<void*>* ptrs, void* const replacement); |
153 | | |
154 | | // Register the UnrefHandler for id |
155 | | void SetHandler(uint32_t id, UnrefHandler handler); |
156 | | |
157 | | // Initialize all the singletons associated with StaticMeta. |
158 | | // |
159 | | // If this function is not called, then the singletons will be |
160 | | // automatically initialized when they are used. |
161 | | // |
162 | | // Calling this function twice or after the singletons have been |
163 | | // initialized will be no-op. |
164 | | static void InitSingletons(); |
165 | | |
166 | | // protect inst, next_instance_id_, free_instance_ids_, head_, |
167 | | // ThreadData.entries |
168 | | // |
169 | | // Note that here we prefer function static variable instead of the usual |
170 | | // global static variable. The reason is that c++ destruction order of |
171 | | // static variables in the reverse order of their construction order. |
172 | | // However, C++ does not guarantee any construction order when global |
173 | | // static variables are defined in different files, while the function |
174 | | // static variables are initialized when their function are first called. |
175 | | // As a result, the construction order of the function static variables |
176 | | // can be controlled by properly invoke their first function calls in |
177 | | // the right order. |
178 | | // |
179 | | // For instance, the following function contains a function static |
180 | | // variable. We place a dummy function call of this inside |
181 | | // Env::Default() to ensure the construction order of the construction |
182 | | // order. |
183 | | static port::Mutex* Mutex(); |
184 | | |
185 | | // Returns the member mutex of the current StaticMeta. In general, |
186 | | // Mutex() should be used instead of this one. However, in case where |
187 | | // the static variable inside Instance() goes out of scope, MemberMutex() |
188 | | // should be used. One example is OnThreadExit() function. |
189 | 51.7k | port::Mutex* MemberMutex() { return &mutex_; } |
190 | | |
191 | | private: |
192 | | // Get UnrefHandler for id with acquiring mutex |
193 | | // REQUIRES: mutex locked |
194 | | UnrefHandler GetHandler(uint32_t id); |
195 | | |
196 | | // Triggered before a thread terminates |
197 | | static void OnThreadExit(void* ptr); |
198 | | |
199 | | // Add current thread's ThreadData to the global chain |
200 | | // REQUIRES: mutex locked |
201 | | void AddThreadData(ThreadData* d); |
202 | | |
203 | | // Remove current thread's ThreadData from the global chain |
204 | | // REQUIRES: mutex locked |
205 | | void RemoveThreadData(ThreadData* d); |
206 | | |
207 | | static ThreadData* GetThreadLocal(); |
208 | | |
209 | | uint32_t next_instance_id_; |
210 | | // Used to recycle Ids in case ThreadLocalPtr is instantiated and destroyed |
211 | | // frequently. This also prevents it from blowing up the vector space. |
212 | | autovector<uint32_t> free_instance_ids_; |
213 | | // Chain all thread local structure together. This is necessary since |
214 | | // when one ThreadLocalPtr gets destroyed, we need to loop over each |
215 | | // thread's version of pointer corresponding to that instance and |
216 | | // call UnrefHandler for it. |
217 | | ThreadData head_; |
218 | | |
219 | | std::unordered_map<uint32_t, UnrefHandler> handler_map_; |
220 | | |
221 | | // The private mutex. Developers should always use Mutex() instead of |
222 | | // using this variable directly. |
223 | | port::Mutex mutex_; |
224 | | #if ROCKSDB_SUPPORT_THREAD_LOCAL |
225 | | // Thread local storage |
226 | | static __thread ThreadData* tls_; |
227 | | #endif // NOLINT |
228 | | |
229 | | // Used to make thread exit trigger possible if !defined(OS_MACOSX). |
230 | | // Otherwise, used to retrieve thread data. |
231 | | pthread_key_t pthread_key_; |
232 | | }; |
233 | | |
234 | | static StaticMeta* Instance(); |
235 | | |
236 | | const uint32_t id_; |
237 | | }; |
238 | | |
239 | | } // namespace rocksdb |