/Users/deen/code/yugabyte-db/src/yb/util/debug/trace_event_impl.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style license that can be |
4 | | // found in the LICENSE file. |
5 | | // |
6 | | // The following only applies to changes made to this file as part of YugaByte development. |
7 | | // |
8 | | // Portions Copyright (c) YugaByte, Inc. |
9 | | // |
10 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
11 | | // in compliance with the License. You may obtain a copy of the License at |
12 | | // |
13 | | // http://www.apache.org/licenses/LICENSE-2.0 |
14 | | // |
15 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
16 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
17 | | // or implied. See the License for the specific language governing permissions and limitations |
18 | | // under the License. |
19 | | // |
20 | | |
21 | | #include "yb/util/debug/trace_event_impl.h" |
22 | | |
23 | | #include <algorithm> |
24 | | #include <list> |
25 | | #include <vector> |
26 | | |
27 | | #include <glog/logging.h> |
28 | | |
29 | | #include "yb/gutil/bind.h" |
30 | | #include "yb/gutil/dynamic_annotations.h" |
31 | | #include "yb/gutil/map-util.h" |
32 | | #include "yb/gutil/mathlimits.h" |
33 | | #include "yb/gutil/singleton.h" |
34 | | #include "yb/gutil/stl_util.h" |
35 | | #include "yb/gutil/stringprintf.h" |
36 | | #include "yb/gutil/strings/join.h" |
37 | | #include "yb/gutil/strings/split.h" |
38 | | #include "yb/gutil/strings/util.h" |
39 | | #include "yb/gutil/sysinfo.h" |
40 | | #include "yb/gutil/walltime.h" |
41 | | |
42 | | #include "yb/util/atomic.h" |
43 | | #include "yb/util/debug/trace_event.h" |
44 | | #include "yb/util/debug/trace_event_synthetic_delay.h" |
45 | | #include "yb/util/flag_tags.h" |
46 | | #include "yb/util/thread.h" |
47 | | |
48 | | DEFINE_string(trace_to_console, "", |
49 | | "Trace pattern specifying which trace events should be dumped " |
50 | | "directly to the console"); |
51 | | TAG_FLAG(trace_to_console, experimental); |
52 | | |
53 | | // The thread buckets for the sampling profiler. |
54 | | BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; |
55 | | |
56 | | namespace yb { |
57 | | namespace debug { |
58 | | |
59 | | using base::SpinLockHolder; |
60 | | |
61 | | using strings::SubstituteAndAppend; |
62 | | using std::string; |
63 | | |
64 | | // This is used to avoid generating trace events during global initialization. If we allow that to |
65 | | // happen, it may lead to segfaults (https://github.com/yugabyte/yugabyte-db/issues/11033). |
66 | | std::atomic<bool> trace_events_enabled{false}; |
67 | | |
68 | | __thread TraceLog::PerThreadInfo* TraceLog::thread_local_info_ = nullptr; |
69 | | |
70 | | namespace { |
71 | | |
72 | | // Controls the number of trace events we will buffer in-memory |
73 | | // before throwing them away. |
74 | | const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize; |
75 | | const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; |
76 | | const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; |
77 | | const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize; |
78 | | // Can store results for 30 seconds with 1 ms sampling interval. |
79 | | const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; |
80 | | // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. |
81 | | const size_t kEchoToConsoleTraceEventBufferChunks = 256; |
82 | | |
83 | | const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY("; |
84 | | |
85 | 1.76M | #define MAX_CATEGORY_GROUPS 100 |
86 | | |
87 | | // Parallel arrays g_category_groups and g_category_group_enabled are separate |
88 | | // so that a pointer to a member of g_category_group_enabled can be easily |
89 | | // converted to an index into g_category_groups. This allows macros to deal |
90 | | // only with char enabled pointers from g_category_group_enabled, and we can |
91 | | // convert internally to determine the category name from the char enabled |
92 | | // pointer. |
93 | | const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
94 | | "toplevel", |
95 | | "tracing already shutdown", |
96 | | "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
97 | | "__metadata"}; |
98 | | |
99 | | // The enabled flag is char instead of bool so that the API can be used from C. |
100 | | unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 }; |
101 | | // Indexes here have to match the g_category_groups array indexes above. |
102 | | const int g_category_already_shutdown = 1; |
103 | | const int g_category_categories_exhausted = 2; |
104 | | const int g_category_metadata = 3; |
105 | | const int g_num_builtin_categories = 4; |
106 | | // Skip default categories. |
107 | | AtomicWord g_category_index = g_num_builtin_categories; |
108 | | |
109 | | // The name of the current thread. This is used to decide if the current |
110 | | // thread name has changed. We combine all the seen thread names into the |
111 | | // output name for the thread. |
112 | | __thread const char* g_current_thread_name = ""; |
113 | | |
114 | 0 | static void NOTIMPLEMENTED() { |
115 | 0 | LOG(FATAL); |
116 | 0 | } |
117 | | |
118 | | class TraceBufferRingBuffer : public TraceBuffer { |
119 | | public: |
120 | | explicit TraceBufferRingBuffer(size_t max_chunks) |
121 | | : max_chunks_(max_chunks), |
122 | | recyclable_chunks_queue_(new size_t[queue_capacity()]), |
123 | | queue_head_(0), |
124 | | queue_tail_(max_chunks), |
125 | | current_iteration_index_(0), |
126 | 18 | current_chunk_seq_(1) { |
127 | 18 | chunks_.reserve(max_chunks); |
128 | 17.2k | for (size_t i = 0; i < max_chunks; ++i) |
129 | 17.2k | recyclable_chunks_queue_[i] = i; |
130 | 18 | } |
131 | | |
132 | 10 | ~TraceBufferRingBuffer() { |
133 | 10 | STLDeleteElements(&chunks_); |
134 | 10 | } |
135 | | |
136 | 928 | std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
137 | | // Because the number of threads is much less than the number of chunks, |
138 | | // the queue should never be empty. |
139 | 928 | DCHECK(!QueueIsEmpty()); |
140 | | |
141 | 928 | *index = recyclable_chunks_queue_[queue_head_]; |
142 | 928 | queue_head_ = NextQueueIndex(queue_head_); |
143 | 928 | current_iteration_index_ = queue_head_; |
144 | | |
145 | 928 | if (*index >= chunks_.size()) |
146 | 928 | chunks_.resize(*index + 1); |
147 | | |
148 | 928 | TraceBufferChunk* chunk = chunks_[*index]; |
149 | 928 | chunks_[*index] = nullptr; // Put NULL in the slot of a in-flight chunk. |
150 | 928 | if (chunk) |
151 | 0 | chunk->Reset(current_chunk_seq_++); |
152 | 928 | else |
153 | 928 | chunk = new TraceBufferChunk(current_chunk_seq_++); |
154 | | |
155 | 928 | return std::unique_ptr<TraceBufferChunk>(chunk); |
156 | 928 | } |
157 | | |
158 | | virtual void ReturnChunk(size_t index, |
159 | 926 | std::unique_ptr<TraceBufferChunk> chunk) override { |
160 | | // When this method is called, the queue should not be full because it |
161 | | // can contain all chunks including the one to be returned. |
162 | 926 | DCHECK(!QueueIsFull()); |
163 | 926 | DCHECK(chunk); |
164 | 926 | DCHECK_LT(index, chunks_.size()); |
165 | 926 | DCHECK(!chunks_[index]); |
166 | 926 | chunks_[index] = chunk.release(); |
167 | 926 | recyclable_chunks_queue_[queue_tail_] = index; |
168 | 926 | queue_tail_ = NextQueueIndex(queue_tail_); |
169 | 926 | } |
170 | | |
171 | 917 | bool IsFull() const override { |
172 | 917 | return false; |
173 | 917 | } |
174 | | |
175 | 0 | size_t Size() const override { |
176 | | // This is approximate because not all of the chunks are full. |
177 | 0 | return chunks_.size() * kTraceBufferChunkSize; |
178 | 0 | } |
179 | | |
180 | 0 | size_t Capacity() const override { |
181 | 0 | return max_chunks_ * kTraceBufferChunkSize; |
182 | 0 | } |
183 | | |
184 | 1 | TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
185 | 1 | if (handle.chunk_index >= chunks_.size()) |
186 | 0 | return nullptr; |
187 | 1 | TraceBufferChunk* chunk = chunks_[handle.chunk_index]; |
188 | 1 | if (!chunk || chunk->seq() != handle.chunk_seq) |
189 | 0 | return nullptr; |
190 | 1 | return chunk->GetEventAt(handle.event_index); |
191 | 1 | } |
192 | | |
193 | 936 | const TraceBufferChunk* NextChunk() override { |
194 | 936 | if (chunks_.empty()) |
195 | 0 | return nullptr; |
196 | | |
197 | 10.0k | while (current_iteration_index_ != queue_tail_) { |
198 | 10.0k | size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; |
199 | 10.0k | current_iteration_index_ = NextQueueIndex(current_iteration_index_); |
200 | 10.0k | if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
201 | 9.07k | continue; |
202 | 926 | DCHECK(chunks_[chunk_index]); |
203 | 926 | return chunks_[chunk_index]; |
204 | 926 | } |
205 | 10 | return nullptr; |
206 | 936 | } |
207 | | |
208 | 0 | std::unique_ptr<TraceBuffer> CloneForIteration() const override { |
209 | 0 | std::unique_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); |
210 | 0 | for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
211 | 0 | queue_index = NextQueueIndex(queue_index)) { |
212 | 0 | size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
213 | 0 | if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
214 | 0 | continue; |
215 | 0 | TraceBufferChunk* chunk = chunks_[chunk_index]; |
216 | 0 | cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : nullptr); |
217 | 0 | } |
218 | 0 | return cloned_buffer; |
219 | 0 | } |
220 | | |
221 | | private: |
222 | | class ClonedTraceBuffer : public TraceBuffer { |
223 | | public: |
224 | 0 | ClonedTraceBuffer() : current_iteration_index_(0) {} |
225 | 0 | ~ClonedTraceBuffer() { |
226 | 0 | STLDeleteElements(&chunks_); |
227 | 0 | } |
228 | | |
229 | | // The only implemented method. |
230 | 0 | const TraceBufferChunk* NextChunk() override { |
231 | 0 | return current_iteration_index_ < chunks_.size() ? |
232 | 0 | chunks_[current_iteration_index_++] : nullptr; |
233 | 0 | } |
234 | | |
235 | 0 | std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
236 | 0 | NOTIMPLEMENTED(); |
237 | 0 | return std::unique_ptr<TraceBufferChunk>(); |
238 | 0 | } |
239 | | virtual void ReturnChunk(size_t index, |
240 | 0 | std::unique_ptr<TraceBufferChunk>) override { |
241 | 0 | NOTIMPLEMENTED(); |
242 | 0 | } |
243 | 0 | bool IsFull() const override { return false; } |
244 | 0 | size_t Size() const override { return 0; } |
245 | 0 | size_t Capacity() const override { return 0; } |
246 | 0 | TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
247 | 0 | return nullptr; |
248 | 0 | } |
249 | 0 | std::unique_ptr<TraceBuffer> CloneForIteration() const override { |
250 | 0 | NOTIMPLEMENTED(); |
251 | 0 | return std::unique_ptr<TraceBuffer>(); |
252 | 0 | } |
253 | | |
254 | | size_t current_iteration_index_; |
255 | | vector<TraceBufferChunk*> chunks_; |
256 | | }; |
257 | | |
258 | 928 | bool QueueIsEmpty() const { |
259 | 928 | return queue_head_ == queue_tail_; |
260 | 928 | } |
261 | | |
262 | 926 | size_t QueueSize() const { |
263 | 10 | return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ : |
264 | 916 | queue_tail_ + queue_capacity() - queue_head_; |
265 | 926 | } |
266 | | |
267 | 926 | bool QueueIsFull() const { |
268 | 926 | return QueueSize() == queue_capacity() - 1; |
269 | 926 | } |
270 | | |
271 | 13.7k | size_t queue_capacity() const { |
272 | | // One extra space to help distinguish full state and empty state. |
273 | 13.7k | return max_chunks_ + 1; |
274 | 13.7k | } |
275 | | |
276 | 11.8k | size_t NextQueueIndex(size_t index) const { |
277 | 11.8k | index++; |
278 | 11.8k | if (index >= queue_capacity()) |
279 | 20 | index = 0; |
280 | 11.8k | return index; |
281 | 11.8k | } |
282 | | |
283 | | size_t max_chunks_; |
284 | | vector<TraceBufferChunk*> chunks_; |
285 | | |
286 | | std::unique_ptr<size_t[]> recyclable_chunks_queue_; |
287 | | size_t queue_head_; |
288 | | size_t queue_tail_; |
289 | | |
290 | | size_t current_iteration_index_; |
291 | | uint32 current_chunk_seq_; |
292 | | |
293 | | DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); |
294 | | }; |
295 | | |
296 | | class TraceBufferVector : public TraceBuffer { |
297 | | public: |
298 | | TraceBufferVector() |
299 | | : in_flight_chunk_count_(0), |
300 | 16.2k | current_iteration_index_(0) { |
301 | 16.2k | chunks_.reserve(kTraceEventVectorBufferChunks); |
302 | 16.2k | } |
303 | 22 | ~TraceBufferVector() { |
304 | 22 | STLDeleteElements(&chunks_); |
305 | 22 | } |
306 | | |
307 | 4.01k | std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
308 | | // This function may be called when adding normal events or indirectly from |
309 | | // AddMetadataEventsWhileLocked(). We can not DCHECK(!IsFull()) because we |
310 | | // have to add the metadata events and flush thread-local buffers even if |
311 | | // the buffer is full. |
312 | 4.01k | *index = chunks_.size(); |
313 | 4.01k | chunks_.push_back(nullptr); // Put NULL in the slot of a in-flight chunk. |
314 | 4.01k | ++in_flight_chunk_count_; |
315 | | // + 1 because zero chunk_seq is not allowed. |
316 | 4.01k | return std::unique_ptr<TraceBufferChunk>( |
317 | 4.01k | new TraceBufferChunk(static_cast<uint32>(*index) + 1)); |
318 | 4.01k | } |
319 | | |
320 | | virtual void ReturnChunk(size_t index, |
321 | 4.01k | std::unique_ptr<TraceBufferChunk> chunk) override { |
322 | 4.01k | DCHECK_GT(in_flight_chunk_count_, 0u); |
323 | 4.01k | DCHECK_LT(index, chunks_.size()); |
324 | 4.01k | DCHECK(!chunks_[index]); |
325 | 4.01k | --in_flight_chunk_count_; |
326 | 4.01k | chunks_[index] = chunk.release(); |
327 | 4.01k | } |
328 | | |
329 | 259k | bool IsFull() const override { |
330 | 259k | return chunks_.size() >= kTraceEventVectorBufferChunks; |
331 | 259k | } |
332 | | |
333 | 1 | size_t Size() const override { |
334 | | // This is approximate because not all of the chunks are full. |
335 | 1 | return chunks_.size() * kTraceBufferChunkSize; |
336 | 1 | } |
337 | | |
338 | 1 | size_t Capacity() const override { |
339 | 1 | return kTraceEventVectorBufferChunks * kTraceBufferChunkSize; |
340 | 1 | } |
341 | | |
342 | 0 | TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
343 | 0 | if (handle.chunk_index >= chunks_.size()) |
344 | 0 | return nullptr; |
345 | 0 | TraceBufferChunk* chunk = chunks_[handle.chunk_index]; |
346 | 0 | if (!chunk || chunk->seq() != handle.chunk_seq) |
347 | 0 | return nullptr; |
348 | 0 | return chunk->GetEventAt(handle.event_index); |
349 | 0 | } |
350 | | |
351 | 4.02k | const TraceBufferChunk* NextChunk() override { |
352 | 4.02k | while (current_iteration_index_ < chunks_.size()) { |
353 | | // Skip in-flight chunks. |
354 | 4.01k | const TraceBufferChunk* chunk = chunks_[current_iteration_index_++]; |
355 | 4.01k | if (chunk) |
356 | 4.01k | return chunk; |
357 | 4.01k | } |
358 | 13 | return nullptr; |
359 | 4.02k | } |
360 | | |
361 | 0 | std::unique_ptr<TraceBuffer> CloneForIteration() const override { |
362 | 0 | NOTIMPLEMENTED(); |
363 | 0 | return std::unique_ptr<TraceBuffer>(); |
364 | 0 | } |
365 | | |
366 | | private: |
367 | | size_t in_flight_chunk_count_; |
368 | | size_t current_iteration_index_; |
369 | | vector<TraceBufferChunk*> chunks_; |
370 | | |
371 | | DISALLOW_COPY_AND_ASSIGN(TraceBufferVector); |
372 | | }; |
373 | | |
374 | | template <typename T> |
375 | | void InitializeMetadataEvent(TraceEvent* trace_event, |
376 | | int64_t thread_id, |
377 | | const char* metadata_name, const char* arg_name, |
378 | 28 | const T& value) { |
379 | 28 | if (!trace_event) |
380 | 0 | return; |
381 | | |
382 | 28 | int num_args = 1; |
383 | 28 | unsigned char arg_type; |
384 | 28 | uint64_t arg_value; |
385 | 28 | ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); |
386 | 28 | trace_event->Initialize(thread_id, |
387 | 28 | MicrosecondsInt64(0), MicrosecondsInt64(0), TRACE_EVENT_PHASE_METADATA, |
388 | 28 | &g_category_group_enabled[g_category_metadata], |
389 | 28 | metadata_name, ::trace_event_internal::kNoEventId, |
390 | 28 | num_args, &arg_name, &arg_type, &arg_value, nullptr, |
391 | 28 | TRACE_EVENT_FLAG_NONE); |
392 | 28 | } trace_event_impl.cc:_ZN2yb5debug12_GLOBAL__N_123InitializeMetadataEventIiEEvPNS0_10TraceEventExPKcS6_RKT_ Line | Count | Source | 378 | 18 | const T& value) { | 379 | 18 | if (!trace_event) | 380 | 0 | return; | 381 | | | 382 | 18 | int num_args = 1; | 383 | 18 | unsigned char arg_type; | 384 | 18 | uint64_t arg_value; | 385 | 18 | ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); | 386 | 18 | trace_event->Initialize(thread_id, | 387 | 18 | MicrosecondsInt64(0), MicrosecondsInt64(0), TRACE_EVENT_PHASE_METADATA, | 388 | 18 | &g_category_group_enabled[g_category_metadata], | 389 | 18 | metadata_name, ::trace_event_internal::kNoEventId, | 390 | 18 | num_args, &arg_name, &arg_type, &arg_value, nullptr, | 391 | 18 | TRACE_EVENT_FLAG_NONE); | 392 | 18 | } |
trace_event_impl.cc:_ZN2yb5debug12_GLOBAL__N_123InitializeMetadataEventINSt3__112basic_stringIcNS3_11char_traitsIcEENS3_9allocatorIcEEEEEEvPNS0_10TraceEventExPKcSD_RKT_ Line | Count | Source | 378 | 10 | const T& value) { | 379 | 10 | if (!trace_event) | 380 | 0 | return; | 381 | | | 382 | 10 | int num_args = 1; | 383 | 10 | unsigned char arg_type; | 384 | 10 | uint64_t arg_value; | 385 | 10 | ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); | 386 | 10 | trace_event->Initialize(thread_id, | 387 | 10 | MicrosecondsInt64(0), MicrosecondsInt64(0), TRACE_EVENT_PHASE_METADATA, | 388 | 10 | &g_category_group_enabled[g_category_metadata], | 389 | 10 | metadata_name, ::trace_event_internal::kNoEventId, | 390 | 10 | num_args, &arg_name, &arg_type, &arg_value, nullptr, | 391 | 10 | TRACE_EVENT_FLAG_NONE); | 392 | 10 | } |
|
393 | | |
394 | | // RAII object which marks '*dst' with a non-zero value while in scope. |
395 | | // This assumes that no other threads write to '*dst'. |
396 | | class MarkFlagInScope { |
397 | | public: |
398 | | explicit MarkFlagInScope(Atomic32* dst) |
399 | 362k | : dst_(dst) { |
400 | | // We currently use Acquire_AtomicExchange here because it appears |
401 | | // to be the cheapest way of getting an "Acquire_Store" barrier. Actually |
402 | | // using Acquire_Store generates more assembly instructions and benchmarks |
403 | | // slightly slower. |
404 | | // |
405 | | // TODO: it would be even faster to avoid the memory barrier here entirely, |
406 | | // and do an asymmetric barrier, for example by having the flusher thread |
407 | | // send a signal to every registered thread, or wait until every other thread |
408 | | // has experienced at least one context switch. A number of options for this |
409 | | // are outlined in: |
410 | | // http://home.comcast.net/~pjbishop/Dave/Asymmetric-Dekker-Synchronization.txt |
411 | 362k | Atomic32 old_val = base::subtle::Acquire_AtomicExchange(dst_, 1); |
412 | 362k | DCHECK_EQ(old_val, 0); |
413 | 362k | } |
414 | 360k | ~MarkFlagInScope() { |
415 | 360k | base::subtle::Release_Store(dst_, 0); |
416 | 360k | } |
417 | | |
418 | | private: |
419 | | Atomic32* dst_; |
420 | | DISALLOW_COPY_AND_ASSIGN(MarkFlagInScope); |
421 | | }; |
422 | | } // anonymous namespace |
423 | | |
424 | 26 | TraceLog::ThreadLocalEventBuffer* TraceLog::PerThreadInfo::AtomicTakeBuffer() { |
425 | 26 | return reinterpret_cast<TraceLog::ThreadLocalEventBuffer*>( |
426 | 26 | base::subtle::Acquire_AtomicExchange( |
427 | 26 | reinterpret_cast<AtomicWord*>(&event_buffer_), |
428 | 26 | 0)); |
429 | 26 | } |
430 | | |
431 | 0 | void TraceBufferChunk::Reset(uint32 new_seq) { |
432 | 0 | for (size_t i = 0; i < next_free_; ++i) |
433 | 0 | chunk_[i].Reset(); |
434 | 0 | next_free_ = 0; |
435 | 0 | seq_ = new_seq; |
436 | 0 | } |
437 | | |
438 | 308k | TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { |
439 | 308k | DCHECK(!IsFull()); |
440 | 308k | *event_index = next_free_++; |
441 | 308k | return &chunk_[*event_index]; |
442 | 308k | } |
443 | | |
444 | 0 | std::unique_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { |
445 | 0 | std::unique_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); |
446 | 0 | cloned_chunk->next_free_ = next_free_; |
447 | 0 | for (size_t i = 0; i < next_free_; ++i) |
448 | 0 | cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); |
449 | 0 | return cloned_chunk; |
450 | 0 | } |
451 | | |
452 | | // A helper class that allows the lock to be acquired in the middle of the scope |
453 | | // and unlocks at the end of scope if locked. |
454 | | class TraceLog::OptionalAutoLock { |
455 | | public: |
456 | | explicit OptionalAutoLock(base::SpinLock* lock) |
457 | | : lock_(lock), |
458 | 53.4k | locked_(false) { |
459 | 53.4k | } |
460 | | |
461 | 51.6k | ~OptionalAutoLock() { |
462 | 51.6k | if (locked_) |
463 | 1 | lock_->Unlock(); |
464 | 51.6k | } |
465 | | |
466 | 1 | void EnsureAcquired() ACQUIRE() { |
467 | 1 | if (!locked_) { |
468 | 1 | lock_->Lock(); |
469 | 1 | locked_ = true; |
470 | 1 | } |
471 | 1 | } |
472 | | |
473 | | private: |
474 | | base::SpinLock* lock_; |
475 | | bool locked_; |
476 | | DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock); |
477 | | }; |
478 | | |
479 | | // Use this function instead of TraceEventHandle constructor to keep the |
480 | | // overhead of ScopedTracer (trace_event.h) constructor minimum. |
481 | | void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index, |
482 | 308k | TraceEventHandle* handle) { |
483 | 308k | DCHECK(chunk_seq); |
484 | 308k | DCHECK(chunk_index < (1u << 16)); |
485 | 308k | DCHECK(event_index < (1u << 16)); |
486 | 308k | handle->chunk_seq = chunk_seq; |
487 | 308k | handle->chunk_index = static_cast<uint16>(chunk_index); |
488 | 308k | handle->event_index = static_cast<uint16>(event_index); |
489 | 308k | } |
490 | | |
491 | | //////////////////////////////////////////////////////////////////////////////// |
492 | | // |
493 | | // TraceEvent |
494 | | // |
495 | | //////////////////////////////////////////////////////////////////////////////// |
496 | | |
497 | | namespace { |
498 | | |
499 | 13 | size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } |
500 | | |
501 | | // Copies |*member| into |*buffer|, sets |*member| to point to this new |
502 | | // location, and then advances |*buffer| by the amount written. |
503 | | void CopyTraceEventParameter(char** buffer, |
504 | | const char** member, |
505 | 13 | const char* end) { |
506 | 13 | if (*member) { |
507 | 13 | size_t written = strings::strlcpy(*buffer, *member, end - *buffer) + 1; |
508 | 13 | DCHECK_LE(static_cast<int>(written), end - *buffer); |
509 | 13 | *member = *buffer; |
510 | 13 | *buffer += written; |
511 | 13 | } |
512 | 13 | } |
513 | | |
514 | | } // namespace |
515 | | |
516 | | TraceEvent::TraceEvent() |
517 | | : duration_(-1), |
518 | | thread_duration_(-1), |
519 | | id_(0u), |
520 | | category_group_enabled_(nullptr), |
521 | | name_(nullptr), |
522 | | thread_id_(0), |
523 | | phase_(TRACE_EVENT_PHASE_BEGIN), |
524 | 316k | flags_(0) { |
525 | 632k | for (auto& arg_name : arg_names_) { |
526 | 632k | arg_name = nullptr; |
527 | 632k | } |
528 | 316k | memset(arg_values_, 0, sizeof(arg_values_)); |
529 | 316k | } |
530 | | |
531 | 316k | TraceEvent::~TraceEvent() { |
532 | 316k | } |
533 | | |
534 | 0 | void TraceEvent::CopyFrom(const TraceEvent& other) { |
535 | 0 | timestamp_ = other.timestamp_; |
536 | 0 | thread_timestamp_ = other.thread_timestamp_; |
537 | 0 | duration_ = other.duration_; |
538 | 0 | id_ = other.id_; |
539 | 0 | category_group_enabled_ = other.category_group_enabled_; |
540 | 0 | name_ = other.name_; |
541 | 0 | thread_id_ = other.thread_id_; |
542 | 0 | phase_ = other.phase_; |
543 | 0 | flags_ = other.flags_; |
544 | 0 | parameter_copy_storage_ = other.parameter_copy_storage_; |
545 | |
|
546 | 0 | for (int i = 0; i < kTraceMaxNumArgs; ++i) { |
547 | 0 | arg_names_[i] = other.arg_names_[i]; |
548 | 0 | arg_types_[i] = other.arg_types_[i]; |
549 | 0 | arg_values_[i] = other.arg_values_[i]; |
550 | 0 | convertable_values_[i] = other.convertable_values_[i]; |
551 | 0 | } |
552 | 0 | } |
553 | | |
554 | | void TraceEvent::Initialize( |
555 | | int64_t thread_id, |
556 | | MicrosecondsInt64 timestamp, |
557 | | MicrosecondsInt64 thread_timestamp, |
558 | | char phase, |
559 | | const unsigned char* category_group_enabled, |
560 | | const char* name, |
561 | | uint64_t id, |
562 | | int num_args, |
563 | | const char** arg_names, |
564 | | const unsigned char* arg_types, |
565 | | const uint64_t* arg_values, |
566 | | const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
567 | 310k | unsigned char flags) { |
568 | 310k | timestamp_ = timestamp; |
569 | 310k | thread_timestamp_ = thread_timestamp; |
570 | 310k | duration_ = -1;; |
571 | 310k | id_ = id; |
572 | 310k | category_group_enabled_ = category_group_enabled; |
573 | 310k | name_ = name; |
574 | 310k | thread_id_ = thread_id; |
575 | 310k | phase_ = phase; |
576 | 310k | flags_ = flags; |
577 | | |
578 | | // Clamp num_args since it may have been set by a third_party library. |
579 | 310k | num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args; |
580 | 310k | int i = 0; |
581 | 347k | for (; i < num_args; ++i) { |
582 | 36.5k | arg_names_[i] = arg_names[i]; |
583 | 36.5k | arg_types_[i] = arg_types[i]; |
584 | | |
585 | 36.5k | if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
586 | 0 | convertable_values_[i] = convertable_values[i]; |
587 | 36.5k | else |
588 | 36.5k | arg_values_[i].as_uint = arg_values[i]; |
589 | 36.5k | } |
590 | 895k | for (; i < kTraceMaxNumArgs; ++i) { |
591 | 584k | arg_names_[i] = nullptr; |
592 | 584k | arg_values_[i].as_uint = 0u; |
593 | 584k | convertable_values_[i] = nullptr; |
594 | 584k | arg_types_[i] = TRACE_VALUE_TYPE_UINT; |
595 | 584k | } |
596 | | |
597 | 310k | bool copy = !!(flags & TRACE_EVENT_FLAG_COPY); |
598 | 310k | size_t alloc_size = 0; |
599 | 310k | if (copy) { |
600 | 0 | alloc_size += GetAllocLength(name); |
601 | 0 | for (i = 0; i < num_args; ++i) { |
602 | 0 | alloc_size += GetAllocLength(arg_names_[i]); |
603 | 0 | if (arg_types_[i] == TRACE_VALUE_TYPE_STRING) |
604 | 0 | arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING; |
605 | 0 | } |
606 | 0 | } |
607 | | |
608 | 310k | bool arg_is_copy[kTraceMaxNumArgs]; |
609 | 347k | for (i = 0; i < num_args; ++i) { |
610 | | // No copying of convertible types, we retain ownership. |
611 | 37.0k | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) { |
612 | 0 | arg_is_copy[i] = false; // Without this, clang analyzer complaisn below. |
613 | 0 | continue; |
614 | 0 | } |
615 | | |
616 | | // We only take a copy of arg_vals if they are of type COPY_STRING. |
617 | 37.0k | arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING); |
618 | 37.0k | if (arg_is_copy[i]) |
619 | 13 | alloc_size += GetAllocLength(arg_values_[i].as_string); |
620 | 37.0k | } |
621 | | |
622 | 310k | if (alloc_size) { |
623 | 13 | parameter_copy_storage_ = new RefCountedString; |
624 | 13 | parameter_copy_storage_->data().resize(alloc_size); |
625 | 13 | char* ptr = string_as_array(¶meter_copy_storage_->data()); |
626 | 13 | const char* end = ptr + alloc_size; |
627 | 13 | if (copy) { |
628 | 0 | CopyTraceEventParameter(&ptr, &name_, end); |
629 | 0 | for (i = 0; i < num_args; ++i) { |
630 | 0 | CopyTraceEventParameter(&ptr, &arg_names_[i], end); |
631 | 0 | } |
632 | 0 | } |
633 | 26 | for (i = 0; i < num_args; ++i) { |
634 | 13 | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
635 | 0 | continue; |
636 | | // Without the assignment to arg_is_copy[i] before a continue statement above, clang |
637 | | // analyzer says this here: |
638 | | // warning: Branch condition evaluates to a garbage value |
639 | 13 | if (arg_is_copy[i]) { |
640 | 13 | CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end); |
641 | 13 | } |
642 | 13 | } |
643 | 0 | DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end; |
644 | 13 | } |
645 | 310k | } |
646 | | |
647 | 0 | void TraceEvent::Reset() { |
648 | | // Only reset fields that won't be initialized in Initialize(), or that may |
649 | | // hold references to other objects. |
650 | 0 | duration_ = -1;; |
651 | 0 | parameter_copy_storage_ = nullptr; |
652 | 0 | for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) |
653 | 0 | convertable_values_[i] = nullptr; |
654 | 0 | } |
655 | | |
656 | | void TraceEvent::UpdateDuration(const MicrosecondsInt64& now, |
657 | 52.0k | const MicrosecondsInt64& thread_now) { |
658 | 52.0k | DCHECK_EQ(duration_, -1); |
659 | 52.0k | duration_ = now - timestamp_; |
660 | 52.0k | thread_duration_ = thread_now - thread_timestamp_; |
661 | 52.0k | } |
662 | | |
663 | | namespace { |
664 | | // Escape the given string using JSON rules. |
665 | 14 | void JsonEscape(GStringPiece s, string* out) { |
666 | 14 | out->reserve(out->size() + s.size() * 2); |
667 | 14 | const char* p_end = s.data() + s.size(); |
668 | 400 | for (const char* p = s.data(); p != p_end; p++) { |
669 | | // Only the following characters need to be escaped, according to json.org. |
670 | | // In particular, it's illegal to escape the single-quote character, and |
671 | | // JSON does not support the "\x" escape sequence like C/Java. |
672 | 386 | switch (*p) { |
673 | 2 | case '"': |
674 | 2 | case '\\': |
675 | 2 | out->push_back('\\'); |
676 | 2 | out->push_back(*p); |
677 | 2 | break; |
678 | 0 | case '\b': |
679 | 0 | out->append("\\b"); |
680 | 0 | break; |
681 | 0 | case '\f': |
682 | 0 | out->append("\\f"); |
683 | 0 | break; |
684 | 1 | case '\n': |
685 | 1 | out->append("\\n"); |
686 | | // The following break statement was missing in the original Kudu code, most likely a bug. |
687 | 1 | break; |
688 | 0 | case '\r': |
689 | 0 | out->append("\\r"); |
690 | 0 | break; |
691 | 0 | case '\t': |
692 | 0 | out->append("\\t"); |
693 | 0 | break; |
694 | 383 | default: |
695 | 383 | out->push_back(*p); |
696 | 386 | } |
697 | 386 | } |
698 | 14 | } |
699 | | } // anonymous namespace |
700 | | |
701 | | // static |
702 | | void TraceEvent::AppendValueAsJSON(unsigned char type, |
703 | | TraceEvent::TraceValue value, |
704 | 40.0k | std::string* out) { |
705 | 40.0k | switch (type) { |
706 | 0 | case TRACE_VALUE_TYPE_BOOL: |
707 | 0 | *out += value.as_bool ? "true" : "false"; |
708 | 0 | break; |
709 | 0 | case TRACE_VALUE_TYPE_UINT: |
710 | 0 | SubstituteAndAppend(out, "$0", static_cast<uint64>(value.as_uint)); |
711 | 0 | break; |
712 | 40.0k | case TRACE_VALUE_TYPE_INT: |
713 | 40.0k | SubstituteAndAppend(out, "$0", static_cast<int64>(value.as_int)); |
714 | 40.0k | break; |
715 | 0 | case TRACE_VALUE_TYPE_DOUBLE: { |
716 | | // FIXME: base/json/json_writer.cc is using the same code, |
717 | | // should be made into a common method. |
718 | 0 | std::string real; |
719 | 0 | double val = value.as_double; |
720 | 0 | if (MathLimits<double>::IsFinite(val)) { |
721 | 0 | real = strings::Substitute("$0", val); |
722 | | // Ensure that the number has a .0 if there's no decimal or 'e'. This |
723 | | // makes sure that when we read the JSON back, it's interpreted as a |
724 | | // real rather than an int. |
725 | 0 | if (real.find('.') == std::string::npos && |
726 | 0 | real.find('e') == std::string::npos && |
727 | 0 | real.find('E') == std::string::npos) { |
728 | 0 | real.append(".0"); |
729 | 0 | } |
730 | | // The JSON spec requires that non-integer values in the range (-1,1) |
731 | | // have a zero before the decimal point - ".52" is not valid, "0.52" is. |
732 | 0 | if (real[0] == '.') { |
733 | 0 | real.insert(0, "0"); |
734 | 0 | } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') { |
735 | | // "-.1" bad "-0.1" good |
736 | 0 | real.insert(1, "0"); |
737 | 0 | } |
738 | 0 | } else if (MathLimits<double>::IsNaN(val)) { |
739 | | // The JSON spec doesn't allow NaN and Infinity (since these are |
740 | | // objects in EcmaScript). Use strings instead. |
741 | 0 | real = "\"NaN\""; |
742 | 0 | } else if (val < 0) { |
743 | 0 | real = "\"-Infinity\""; |
744 | 0 | } else { |
745 | 0 | real = "\"Infinity\""; |
746 | 0 | } |
747 | 0 | SubstituteAndAppend(out, "$0", real); |
748 | 0 | break; |
749 | 0 | } |
750 | 0 | case TRACE_VALUE_TYPE_POINTER: |
751 | | // JSON only supports double and int numbers. |
752 | | // So as not to lose bits from a 64-bit pointer, output as a hex string. |
753 | 0 | StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>( |
754 | 0 | reinterpret_cast<intptr_t>( |
755 | 0 | value.as_pointer))); |
756 | 0 | break; |
757 | 1 | case TRACE_VALUE_TYPE_STRING: |
758 | 14 | case TRACE_VALUE_TYPE_COPY_STRING: |
759 | 14 | *out += "\""; |
760 | 14 | JsonEscape(value.as_string ? value.as_string : "NULL", out); |
761 | 14 | *out += "\""; |
762 | 14 | break; |
763 | 0 | default: |
764 | 0 | LOG(FATAL) << "Don't know how to print this value"; |
765 | 0 | break; |
766 | 40.0k | } |
767 | 40.0k | } |
768 | | |
769 | 314k | void TraceEvent::AppendAsJSON(std::string* out) const { |
770 | 314k | int64 time_int64 = timestamp_; |
771 | 314k | int process_id = TraceLog::GetInstance()->process_id(); |
772 | | // Category group checked at category creation time. |
773 | 314k | DCHECK(!strchr(name_, '"')); |
774 | 314k | StringAppendF(out, |
775 | 314k | "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%" PRId64 ",\"ts\":%" PRId64 "," |
776 | 314k | "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{", |
777 | 314k | TraceLog::GetCategoryGroupName(category_group_enabled_), |
778 | 314k | process_id, |
779 | 314k | thread_id_, |
780 | 314k | time_int64, |
781 | 314k | phase_, |
782 | 314k | name_); |
783 | | |
784 | | // Output argument names and values, stop at first NULL argument name. |
785 | 354k | for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { |
786 | 40.0k | if (i > 0) |
787 | 0 | *out += ","; |
788 | 40.0k | *out += "\""; |
789 | 40.0k | *out += arg_names_[i]; |
790 | 40.0k | *out += "\":"; |
791 | | |
792 | 40.0k | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
793 | 0 | convertable_values_[i]->AppendAsTraceFormat(out); |
794 | 40.0k | else |
795 | 40.0k | AppendValueAsJSON(arg_types_[i], arg_values_[i], out); |
796 | 40.0k | } |
797 | 314k | *out += "}"; |
798 | | |
799 | 314k | if (phase_ == TRACE_EVENT_PHASE_COMPLETE) { |
800 | 57.9k | int64 duration = duration_; |
801 | 57.9k | if (duration != -1) |
802 | 57.9k | StringAppendF(out, ",\"dur\":%" PRId64, duration); |
803 | 57.9k | if (thread_timestamp_ >= 0) { |
804 | 57.9k | int64 thread_duration = thread_duration_; |
805 | 57.9k | if (thread_duration != -1) |
806 | 57.9k | StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration); |
807 | 57.9k | } |
808 | 57.9k | } |
809 | | |
810 | | // Output tts if thread_timestamp is valid. |
811 | 314k | if (thread_timestamp_ >= 0) { |
812 | 314k | int64 thread_time_int64 = thread_timestamp_; |
813 | 314k | StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64); |
814 | 314k | } |
815 | | |
816 | | // If id_ is set, print it out as a hex string so we don't loose any |
817 | | // bits (it might be a 64-bit pointer). |
818 | 314k | if (flags_ & TRACE_EVENT_FLAG_HAS_ID) |
819 | 0 | StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_)); |
820 | | |
821 | | // Instant events also output their scope. |
822 | 314k | if (phase_ == TRACE_EVENT_PHASE_INSTANT) { |
823 | 255k | char scope = '?'; |
824 | 255k | switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) { |
825 | 255k | case TRACE_EVENT_SCOPE_GLOBAL: |
826 | 255k | scope = TRACE_EVENT_SCOPE_NAME_GLOBAL; |
827 | 255k | break; |
828 | | |
829 | 0 | case TRACE_EVENT_SCOPE_PROCESS: |
830 | 0 | scope = TRACE_EVENT_SCOPE_NAME_PROCESS; |
831 | 0 | break; |
832 | | |
833 | 2 | case TRACE_EVENT_SCOPE_THREAD: |
834 | 2 | scope = TRACE_EVENT_SCOPE_NAME_THREAD; |
835 | 2 | break; |
836 | 255k | } |
837 | 255k | StringAppendF(out, ",\"s\":\"%c\"", scope); |
838 | 255k | } |
839 | | |
840 | 314k | *out += "}"; |
841 | 314k | } |
842 | | |
843 | 1 | void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const { |
844 | 1 | *out << name_ << "["; |
845 | 1 | *out << TraceLog::GetCategoryGroupName(category_group_enabled_); |
846 | 1 | *out << "]"; |
847 | 1 | if (arg_names_[0]) { |
848 | 1 | *out << ", {"; |
849 | 2 | for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { |
850 | 1 | if (i > 0) |
851 | 0 | *out << ", "; |
852 | 1 | *out << arg_names_[i] << ":"; |
853 | 1 | std::string value_as_text; |
854 | | |
855 | 1 | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
856 | 0 | convertable_values_[i]->AppendAsTraceFormat(&value_as_text); |
857 | 1 | else |
858 | 1 | AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text); |
859 | | |
860 | 1 | *out << value_as_text; |
861 | 1 | } |
862 | 1 | *out << "}"; |
863 | 1 | } |
864 | 1 | } |
865 | | |
866 | | //////////////////////////////////////////////////////////////////////////////// |
867 | | // |
868 | | // TraceResultBuffer |
869 | | // |
870 | | //////////////////////////////////////////////////////////////////////////////// |
871 | | |
872 | 23 | string TraceResultBuffer::FlushTraceLogToString() { |
873 | 23 | return DoFlush(false); |
874 | 23 | } |
875 | | |
876 | 0 | string TraceResultBuffer::FlushTraceLogToStringButLeaveBufferIntact() { |
877 | 0 | return DoFlush(true); |
878 | 0 | } |
879 | | |
880 | 23 | string TraceResultBuffer::DoFlush(bool leave_intact) { |
881 | 23 | TraceResultBuffer buf; |
882 | 23 | TraceLog* tl = TraceLog::GetInstance(); |
883 | 23 | if (leave_intact) { |
884 | 0 | tl->FlushButLeaveBufferIntact(Bind(&TraceResultBuffer::Collect, Unretained(&buf))); |
885 | 23 | } else { |
886 | 23 | tl->Flush(Bind(&TraceResultBuffer::Collect, Unretained(&buf))); |
887 | 23 | } |
888 | 23 | buf.json_.append("]}\n"); |
889 | 23 | return buf.json_; |
890 | 23 | } |
891 | | |
892 | | TraceResultBuffer::TraceResultBuffer() |
893 | 23 | : first_(true) { |
894 | 23 | } |
895 | 23 | TraceResultBuffer::~TraceResultBuffer() { |
896 | 23 | } |
897 | | |
898 | | void TraceResultBuffer::Collect( |
899 | | const scoped_refptr<RefCountedString>& s, |
900 | 348 | bool has_more_events) { |
901 | 348 | if (first_) { |
902 | 23 | json_.append("{\"traceEvents\": [\n"); |
903 | 23 | first_ = false; |
904 | 325 | } else if (!s->data().empty()) { |
905 | | // Sometimes we get sent an empty chunk at the end, |
906 | | // and we don't want to end up with an extra trailing ','. |
907 | 325 | json_.append(",\n"); |
908 | 325 | } |
909 | 348 | json_.append(s->data()); |
910 | 348 | } |
911 | | |
912 | | //////////////////////////////////////////////////////////////////////////////// |
913 | | // |
914 | | // TraceSamplingThread |
915 | | // |
916 | | //////////////////////////////////////////////////////////////////////////////// |
917 | | class TraceBucketData; |
918 | | typedef Callback<void(TraceBucketData*)> TraceSampleCallback; |
919 | | |
920 | | class TraceBucketData { |
921 | | public: |
922 | | TraceBucketData(AtomicWord* bucket, |
923 | | const char* name, |
924 | | TraceSampleCallback callback); |
925 | | ~TraceBucketData(); |
926 | | |
927 | | TRACE_EVENT_API_ATOMIC_WORD* bucket; |
928 | | const char* bucket_name; |
929 | | TraceSampleCallback callback; |
930 | | }; |
931 | | |
932 | | // This object must be created on the IO thread. |
933 | | class TraceSamplingThread { |
934 | | public: |
935 | | TraceSamplingThread(); |
936 | | virtual ~TraceSamplingThread(); |
937 | | |
938 | | void ThreadMain(); |
939 | | |
940 | | static void DefaultSamplingCallback(TraceBucketData* bucekt_data); |
941 | | |
942 | | void Stop(); |
943 | | |
944 | | private: |
945 | | friend class TraceLog; |
946 | | |
947 | | void GetSamples(); |
948 | | // Not thread-safe. Once the ThreadMain has been called, this can no longer |
949 | | // be called. |
950 | | void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket, |
951 | | const char* const name, |
952 | | TraceSampleCallback callback); |
953 | | // Splits a combined "category\0name" into the two component parts. |
954 | | static void ExtractCategoryAndName(const char* combined, |
955 | | const char** category, |
956 | | const char** name); |
957 | | std::vector<TraceBucketData> sample_buckets_; |
958 | | bool thread_running_; |
959 | | AtomicBool cancellation_flag_; |
960 | | }; |
961 | | |
962 | | |
963 | | TraceSamplingThread::TraceSamplingThread() |
964 | | : thread_running_(false), |
965 | 1 | cancellation_flag_(false) { |
966 | 1 | } |
967 | | |
968 | 1 | TraceSamplingThread::~TraceSamplingThread() { |
969 | 1 | } |
970 | | |
971 | 1 | void TraceSamplingThread::ThreadMain() { |
972 | 1 | thread_running_ = true; |
973 | 1 | const MonoDelta sleepDelta = MonoDelta::FromMicroseconds(1000); |
974 | 102 | while (!cancellation_flag_.Load()) { |
975 | 101 | SleepFor(sleepDelta); |
976 | 101 | GetSamples(); |
977 | 101 | } |
978 | 1 | } |
979 | | |
980 | | // static |
981 | | void TraceSamplingThread::DefaultSamplingCallback( |
982 | 303 | TraceBucketData* bucket_data) { |
983 | 303 | TRACE_EVENT_API_ATOMIC_WORD category_and_name = |
984 | 303 | TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket); |
985 | 303 | if (!category_and_name) |
986 | 202 | return; |
987 | 101 | const char* const combined = |
988 | 101 | reinterpret_cast<const char* const>(category_and_name); |
989 | 101 | const char* category_group; |
990 | 101 | const char* name; |
991 | 101 | ExtractCategoryAndName(combined, &category_group, &name); |
992 | | |
993 | 101 | TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE, |
994 | 101 | TraceLog::GetCategoryGroupEnabled(category_group), |
995 | 101 | name, 0, 0, nullptr, nullptr, nullptr, nullptr, 0); |
996 | 101 | } |
997 | | |
998 | 101 | void TraceSamplingThread::GetSamples() { |
999 | 303 | for (auto& sample_bucket : sample_buckets_) { |
1000 | 303 | TraceBucketData* bucket_data = &sample_bucket; |
1001 | 303 | bucket_data->callback.Run(bucket_data); |
1002 | 303 | } |
1003 | 101 | } |
1004 | | |
1005 | | void TraceSamplingThread::RegisterSampleBucket( |
1006 | | TRACE_EVENT_API_ATOMIC_WORD* bucket, |
1007 | | const char* const name, |
1008 | 3 | TraceSampleCallback callback) { |
1009 | | // Access to sample_buckets_ doesn't cause races with the sampling thread |
1010 | | // that uses the sample_buckets_, because it is guaranteed that |
1011 | | // RegisterSampleBucket is called before the sampling thread is created. |
1012 | 3 | DCHECK(!thread_running_); |
1013 | 3 | sample_buckets_.push_back(TraceBucketData(bucket, name, callback)); |
1014 | 3 | } |
1015 | | |
1016 | | // static |
1017 | | void TraceSamplingThread::ExtractCategoryAndName(const char* combined, |
1018 | | const char** category, |
1019 | 101 | const char** name) { |
1020 | 101 | *category = combined; |
1021 | 101 | *name = &combined[strlen(combined) + 1]; |
1022 | 101 | } |
1023 | | |
1024 | 1 | void TraceSamplingThread::Stop() { |
1025 | 1 | cancellation_flag_.Store(true); |
1026 | 1 | } |
1027 | | |
1028 | | TraceBucketData::TraceBucketData(AtomicWord* bucket, const char* name, |
1029 | | TraceSampleCallback callback) |
1030 | 3 | : bucket(bucket), bucket_name(name), callback(std::move(callback)) {} |
1031 | | |
1032 | 9 | TraceBucketData::~TraceBucketData() { |
1033 | 9 | } |
1034 | | |
1035 | | //////////////////////////////////////////////////////////////////////////////// |
1036 | | // |
1037 | | // TraceLog |
1038 | | // |
1039 | | //////////////////////////////////////////////////////////////////////////////// |
1040 | | |
1041 | | class TraceLog::ThreadLocalEventBuffer { |
1042 | | public: |
1043 | | explicit ThreadLocalEventBuffer(TraceLog* trace_log); |
1044 | | virtual ~ThreadLocalEventBuffer(); |
1045 | | |
1046 | | TraceEvent* AddTraceEvent(TraceEventHandle* handle); |
1047 | | |
1048 | 52.0k | TraceEvent* GetEventByHandle(TraceEventHandle handle) { |
1049 | 52.0k | if (!chunk_ || handle.chunk_seq != chunk_->seq() || |
1050 | 51.8k | handle.chunk_index != chunk_index_) |
1051 | 1 | return nullptr; |
1052 | | |
1053 | 52.0k | return chunk_->GetEventAt(handle.event_index); |
1054 | 52.0k | } |
1055 | | |
1056 | 308k | int generation() const { return generation_; } |
1057 | | |
1058 | | void Flush(int64_t tid); |
1059 | | |
1060 | | private: |
1061 | | // Check that the current thread is the one that constructed this trace buffer. |
1062 | 308k | void CheckIsOwnerThread() const { |
1063 | 308k | DCHECK_EQ(yb::Thread::UniqueThreadId(), owner_tid_); |
1064 | 308k | } |
1065 | | |
1066 | | // Since TraceLog is a leaky singleton, trace_log_ will always be valid |
1067 | | // as long as the thread exists. |
1068 | | TraceLog* trace_log_; |
1069 | | std::unique_ptr<TraceBufferChunk> chunk_; |
1070 | | size_t chunk_index_; |
1071 | | int generation_; |
1072 | | |
1073 | | // The TID of the thread that constructed this event buffer. Only this thread |
1074 | | // may add trace events. |
1075 | | int64_t owner_tid_; |
1076 | | |
1077 | | DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); |
1078 | | }; |
1079 | | |
1080 | | TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) |
1081 | | : trace_log_(trace_log), |
1082 | | chunk_index_(0), |
1083 | | generation_(trace_log->generation()), |
1084 | 24 | owner_tid_(yb::Thread::UniqueThreadId()) { |
1085 | 24 | } |
1086 | | |
1087 | 23 | TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { |
1088 | 23 | } |
1089 | | |
1090 | | TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent( |
1091 | 308k | TraceEventHandle* handle) { |
1092 | 308k | CheckIsOwnerThread(); |
1093 | | |
1094 | 308k | if (chunk_ && chunk_->IsFull()) { |
1095 | 4.89k | SpinLockHolder lock(&trace_log_->lock_); |
1096 | 4.89k | Flush(Thread::UniqueThreadId()); |
1097 | 4.89k | chunk_.reset(); |
1098 | 4.89k | } |
1099 | 308k | if (!chunk_) { |
1100 | 4.92k | SpinLockHolder lock(&trace_log_->lock_); |
1101 | 4.92k | chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_); |
1102 | 4.92k | trace_log_->CheckIfBufferIsFullWhileLocked(); |
1103 | 4.92k | } |
1104 | 308k | if (!chunk_) |
1105 | 0 | return nullptr; |
1106 | | |
1107 | 308k | size_t event_index; |
1108 | 308k | TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index); |
1109 | 309k | if (trace_event && handle) |
1110 | 309k | MakeHandle(chunk_->seq(), chunk_index_, event_index, handle); |
1111 | | |
1112 | 308k | return trace_event; |
1113 | 308k | } |
1114 | | |
1115 | 4.92k | void TraceLog::ThreadLocalEventBuffer::Flush(int64_t tid) { |
1116 | 4.92k | DCHECK(trace_log_->lock_.IsHeld()); |
1117 | | |
1118 | 4.92k | if (!chunk_) |
1119 | 4 | return; |
1120 | | |
1121 | 4.92k | if (trace_log_->CheckGeneration(generation_)) { |
1122 | | // Return the chunk to the buffer only if the generation matches. |
1123 | 4.92k | trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_)); |
1124 | 4.92k | } |
1125 | 4.92k | } |
1126 | | |
1127 | | // static |
1128 | 1.59M | TraceLog* TraceLog::GetInstance() { |
1129 | 1.59M | return Singleton<TraceLog>::get(); |
1130 | 1.59M | } |
1131 | | |
1132 | | TraceLog::TraceLog() |
1133 | | : mode_(DISABLED), |
1134 | | num_traces_recorded_(0), |
1135 | | event_callback_(0), |
1136 | | dispatching_to_observer_list_(false), |
1137 | | process_sort_index_(0), |
1138 | | process_id_hash_(0), |
1139 | | process_id_(0), |
1140 | | time_offset_(0), |
1141 | | watch_category_(0), |
1142 | | trace_options_(RECORD_UNTIL_FULL), |
1143 | | sampling_thread_handle_(nullptr), |
1144 | | category_filter_(CategoryFilter::kDefaultCategoryFilterString), |
1145 | | event_callback_category_filter_( |
1146 | | CategoryFilter::kDefaultCategoryFilterString), |
1147 | | thread_shared_chunk_index_(0), |
1148 | 16.2k | generation_(0) { |
1149 | | // Trace is enabled or disabled on one thread while other threads are |
1150 | | // accessing the enabled flag. We don't care whether edge-case events are |
1151 | | // traced or not, so we allow races on the enabled flag to keep the trace |
1152 | | // macros fast. |
1153 | 16.2k | ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, |
1154 | 16.2k | sizeof(g_category_group_enabled), |
1155 | 16.2k | "trace_event category enabled"); |
1156 | 1.63M | for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { |
1157 | 1.62M | ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], |
1158 | 1.62M | "trace_event category enabled"); |
1159 | 1.62M | } |
1160 | 16.2k | SetProcessID(static_cast<int>(getpid())); |
1161 | | |
1162 | 16.2k | string filter = FLAGS_trace_to_console; |
1163 | 16.2k | if (!filter.empty()) { |
1164 | 0 | SetEnabled(CategoryFilter(filter), RECORDING_MODE, ECHO_TO_CONSOLE); |
1165 | 0 | LOG(ERROR) << "Tracing to console with CategoryFilter '" << filter << "'."; |
1166 | 0 | } |
1167 | | |
1168 | 16.2k | logged_events_.reset(CreateTraceBuffer()); |
1169 | 16.2k | } |
1170 | | |
1171 | 0 | TraceLog::~TraceLog() { |
1172 | 0 | } |
1173 | | |
1174 | | const unsigned char* TraceLog::GetCategoryGroupEnabled( |
1175 | 667k | const char* category_group) { |
1176 | 667k | TraceLog* tracelog = GetInstance(); |
1177 | 667k | if (!tracelog) { |
1178 | 0 | DCHECK(!g_category_group_enabled[g_category_already_shutdown]); |
1179 | 0 | return &g_category_group_enabled[g_category_already_shutdown]; |
1180 | 0 | } |
1181 | 667k | return tracelog->GetCategoryGroupEnabledInternal(category_group); |
1182 | 667k | } |
1183 | | |
1184 | | const char* TraceLog::GetCategoryGroupName( |
1185 | 314k | const unsigned char* category_group_enabled) { |
1186 | | // Calculate the index of the category group by finding |
1187 | | // category_group_enabled in g_category_group_enabled array. |
1188 | 314k | uintptr_t category_begin = |
1189 | 314k | reinterpret_cast<uintptr_t>(g_category_group_enabled); |
1190 | 314k | uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); |
1191 | 0 | DCHECK(category_ptr >= category_begin && |
1192 | 0 | category_ptr < reinterpret_cast<uintptr_t>( |
1193 | 0 | g_category_group_enabled + MAX_CATEGORY_GROUPS)) << |
1194 | 0 | "out of bounds category pointer"; |
1195 | 314k | uintptr_t category_index = |
1196 | 314k | (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
1197 | 314k | return g_category_groups[category_index]; |
1198 | 314k | } |
1199 | | |
1200 | 125k | void TraceLog::UpdateCategoryGroupEnabledFlag(AtomicWord category_index) { |
1201 | 125k | unsigned char enabled_flag = 0; |
1202 | 125k | const char* category_group = g_category_groups[category_index]; |
1203 | 125k | if (mode_ == RECORDING_MODE && |
1204 | 148 | category_filter_.IsCategoryGroupEnabled(category_group)) |
1205 | 100 | enabled_flag |= ENABLED_FOR_RECORDING; |
1206 | 124k | else if (mode_ == MONITORING_MODE && |
1207 | 0 | category_filter_.IsCategoryGroupEnabled(category_group)) |
1208 | 0 | enabled_flag |= ENABLED_FOR_MONITORING; |
1209 | 125k | if (event_callback_ && |
1210 | 86 | event_callback_category_filter_.IsCategoryGroupEnabled(category_group)) |
1211 | 38 | enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
1212 | 125k | g_category_group_enabled[category_index] = enabled_flag; |
1213 | 125k | } |
1214 | | |
1215 | 50 | void TraceLog::UpdateCategoryGroupEnabledFlags() { |
1216 | 50 | auto category_index = base::subtle::NoBarrier_Load(&g_category_index); |
1217 | 372 | for (AtomicWord i = 0; i < category_index; i++) |
1218 | 322 | UpdateCategoryGroupEnabledFlag(i); |
1219 | 50 | } |
1220 | | |
1221 | 18 | void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() { |
1222 | 18 | ResetTraceEventSyntheticDelays(); |
1223 | 18 | const CategoryFilter::StringList& delays = |
1224 | 18 | category_filter_.GetSyntheticDelayValues(); |
1225 | 18 | CategoryFilter::StringList::const_iterator ci; |
1226 | 18 | for (ci = delays.begin(); ci != delays.end(); ++ci) { |
1227 | 0 | std::list<string> tokens = strings::Split(*ci, ";"); |
1228 | 0 | if (tokens.empty()) continue; |
1229 | | |
1230 | 0 | TraceEventSyntheticDelay* delay = |
1231 | 0 | TraceEventSyntheticDelay::Lookup(tokens.front()); |
1232 | 0 | tokens.pop_front(); |
1233 | 0 | while (!tokens.empty()) { |
1234 | 0 | std::string token = tokens.front(); |
1235 | 0 | tokens.pop_front(); |
1236 | 0 | char* duration_end; |
1237 | 0 | double target_duration = strtod(token.c_str(), &duration_end); |
1238 | 0 | if (duration_end != token.c_str()) { |
1239 | 0 | delay->SetTargetDuration(MonoDelta::FromSeconds(target_duration)); |
1240 | 0 | } else if (token == "static") { |
1241 | 0 | delay->SetMode(TraceEventSyntheticDelay::STATIC); |
1242 | 0 | } else if (token == "oneshot") { |
1243 | 0 | delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT); |
1244 | 0 | } else if (token == "alternating") { |
1245 | 0 | delay->SetMode(TraceEventSyntheticDelay::ALTERNATING); |
1246 | 0 | } |
1247 | 0 | } |
1248 | 0 | } |
1249 | 18 | } |
1250 | | |
1251 | | const unsigned char* TraceLog::GetCategoryGroupEnabledInternal( |
1252 | 668k | const char* category_group) { |
1253 | 1.22k | DCHECK(!strchr(category_group, '"')) << |
1254 | 1.22k | "Category groups may not contain double quote"; |
1255 | | // The g_category_groups is append only, avoid using a lock for the fast path. |
1256 | 668k | auto current_category_index = base::subtle::Acquire_Load(&g_category_index); |
1257 | | |
1258 | | // Search for pre-existing category group. |
1259 | 6.05M | for (AtomicWord i = 0; i < current_category_index; ++i) { |
1260 | 5.92M | if (strcmp(g_category_groups[i], category_group) == 0) { |
1261 | 541k | return &g_category_group_enabled[i]; |
1262 | 541k | } |
1263 | 5.92M | } |
1264 | | |
1265 | 126k | unsigned char* category_group_enabled = nullptr; |
1266 | | // This is the slow path: the lock is not held in the case above, so more |
1267 | | // than one thread could have reached here trying to add the same category. |
1268 | | // Only hold to lock when actually appending a new category, and |
1269 | | // check the categories groups again. |
1270 | 126k | SpinLockHolder lock(&lock_); |
1271 | 126k | auto category_index = base::subtle::Acquire_Load(&g_category_index); |
1272 | 1.28M | for (AtomicWord i = 0; i < category_index; ++i) { |
1273 | 1.15M | if (strcmp(g_category_groups[i], category_group) == 0) { |
1274 | 2.73k | return &g_category_group_enabled[i]; |
1275 | 2.73k | } |
1276 | 1.15M | } |
1277 | | |
1278 | | // Create a new category group. |
1279 | 18.4E | DCHECK(category_index < MAX_CATEGORY_GROUPS) << |
1280 | 18.4E | "must increase MAX_CATEGORY_GROUPS"; |
1281 | 124k | if (category_index < MAX_CATEGORY_GROUPS) { |
1282 | | // Don't hold on to the category_group pointer, so that we can create |
1283 | | // category groups with strings not known at compile time (this is |
1284 | | // required by SetWatchEvent). |
1285 | 124k | const char* new_group = strdup(category_group); |
1286 | | // NOTE: new_group is leaked, but this is a small finite amount of data. |
1287 | 124k | g_category_groups[category_index] = new_group; |
1288 | 124k | DCHECK(!g_category_group_enabled[category_index]); |
1289 | | // Note that if both included and excluded patterns in the |
1290 | | // CategoryFilter are empty, we exclude nothing, |
1291 | | // thereby enabling this category group. |
1292 | 124k | UpdateCategoryGroupEnabledFlag(category_index); |
1293 | 124k | category_group_enabled = &g_category_group_enabled[category_index]; |
1294 | | // Update the max index now. |
1295 | 124k | base::subtle::Release_Store(&g_category_index, category_index + 1); |
1296 | 18.4E | } else { |
1297 | 18.4E | category_group_enabled = |
1298 | 18.4E | &g_category_group_enabled[g_category_categories_exhausted]; |
1299 | 18.4E | } |
1300 | 124k | return category_group_enabled; |
1301 | 126k | } |
1302 | | |
1303 | | void TraceLog::GetKnownCategoryGroups( |
1304 | 1 | std::vector<std::string>* category_groups) { |
1305 | 1 | SpinLockHolder lock(&lock_); |
1306 | 1 | auto category_index = base::subtle::NoBarrier_Load(&g_category_index); |
1307 | 11 | for (AtomicWord i = g_num_builtin_categories; i < category_index; i++) |
1308 | 10 | category_groups->push_back(g_category_groups[i]); |
1309 | 1 | } |
1310 | | |
1311 | | void TraceLog::SetEnabled(const CategoryFilter& category_filter, |
1312 | | Mode mode, |
1313 | 18 | Options options) { |
1314 | 18 | std::vector<EnabledStateObserver*> observer_list; |
1315 | 18 | { |
1316 | 18 | SpinLockHolder lock(&lock_); |
1317 | | |
1318 | | // Can't enable tracing when Flush() is in progress. |
1319 | 18 | Options old_options = trace_options(); |
1320 | | |
1321 | 18 | if (IsEnabled()) { |
1322 | 0 | if (options != old_options) { |
1323 | 0 | DLOG(ERROR) << "Attempting to re-enable tracing with a different " |
1324 | 0 | << "set of options."; |
1325 | 0 | } |
1326 | |
|
1327 | 0 | if (mode != mode_) { |
1328 | 0 | DLOG(ERROR) << "Attempting to re-enable tracing with a different mode."; |
1329 | 0 | } |
1330 | |
|
1331 | 0 | category_filter_.Merge(category_filter); |
1332 | 0 | UpdateCategoryGroupEnabledFlags(); |
1333 | 0 | return; |
1334 | 0 | } |
1335 | | |
1336 | 18 | if (dispatching_to_observer_list_) { |
1337 | 0 | DLOG(ERROR) << |
1338 | 0 | "Cannot manipulate TraceLog::Enabled state from an observer."; |
1339 | 0 | return; |
1340 | 0 | } |
1341 | | |
1342 | 18 | mode_ = mode; |
1343 | | |
1344 | 18 | if (options != old_options) { |
1345 | 9 | base::subtle::NoBarrier_Store(&trace_options_, options); |
1346 | 9 | UseNextTraceBuffer(); |
1347 | 9 | } |
1348 | | |
1349 | 18 | num_traces_recorded_++; |
1350 | | |
1351 | 18 | category_filter_ = CategoryFilter(category_filter); |
1352 | 18 | UpdateCategoryGroupEnabledFlags(); |
1353 | 18 | UpdateSyntheticDelaysFromCategoryFilter(); |
1354 | | |
1355 | 18 | if (options & ENABLE_SAMPLING) { |
1356 | 1 | sampling_thread_.reset(new TraceSamplingThread); |
1357 | 1 | sampling_thread_->RegisterSampleBucket( |
1358 | 1 | &g_trace_state[0], |
1359 | 1 | "bucket0", |
1360 | 1 | Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
1361 | 1 | sampling_thread_->RegisterSampleBucket( |
1362 | 1 | &g_trace_state[1], |
1363 | 1 | "bucket1", |
1364 | 1 | Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
1365 | 1 | sampling_thread_->RegisterSampleBucket( |
1366 | 1 | &g_trace_state[2], |
1367 | 1 | "bucket2", |
1368 | 1 | Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
1369 | | |
1370 | 1 | Status s = Thread::Create("tracing", "sampler", |
1371 | 1 | &TraceSamplingThread::ThreadMain, |
1372 | 1 | sampling_thread_.get(), |
1373 | 1 | &sampling_thread_handle_); |
1374 | 1 | if (!s.ok()) { |
1375 | 0 | LOG(DFATAL) << "failed to create trace sampling thread: " << s.ToString(); |
1376 | 0 | } |
1377 | 1 | } |
1378 | | |
1379 | 18 | dispatching_to_observer_list_ = true; |
1380 | 18 | observer_list = enabled_state_observer_list_; |
1381 | 18 | } |
1382 | | // Notify observers outside the lock in case they trigger trace events. |
1383 | 18 | for (const auto& observer : observer_list) |
1384 | 0 | observer->OnTraceLogEnabled(); |
1385 | | |
1386 | 18 | { |
1387 | 18 | SpinLockHolder lock(&lock_); |
1388 | 18 | dispatching_to_observer_list_ = false; |
1389 | 18 | } |
1390 | 18 | } |
1391 | | |
1392 | 0 | CategoryFilter TraceLog::GetCurrentCategoryFilter() { |
1393 | 0 | SpinLockHolder lock(&lock_); |
1394 | 0 | return category_filter_; |
1395 | 0 | } |
1396 | | |
1397 | 24 | void TraceLog::SetDisabled() { |
1398 | 24 | SpinLockHolder lock(&lock_); |
1399 | 24 | SetDisabledWhileLocked(); |
1400 | 24 | } |
1401 | | |
1402 | 25 | void NO_THREAD_SAFETY_ANALYSIS TraceLog::SetDisabledWhileLocked() { |
1403 | 25 | DCHECK(lock_.IsHeld()); |
1404 | | |
1405 | 25 | if (!IsEnabled()) |
1406 | 7 | return; |
1407 | | |
1408 | 18 | if (dispatching_to_observer_list_) { |
1409 | 0 | DLOG(ERROR) |
1410 | 0 | << "Cannot manipulate TraceLog::Enabled state from an observer."; |
1411 | 0 | return; |
1412 | 0 | } |
1413 | | |
1414 | 18 | mode_ = DISABLED; |
1415 | | |
1416 | 18 | if (sampling_thread_.get()) { |
1417 | | // Stop the sampling thread. |
1418 | 1 | sampling_thread_->Stop(); |
1419 | 1 | lock_.Unlock(); |
1420 | 1 | sampling_thread_handle_->Join(); |
1421 | 1 | lock_.Lock(); |
1422 | 1 | sampling_thread_handle_.reset(); |
1423 | 1 | sampling_thread_.reset(); |
1424 | 1 | } |
1425 | | |
1426 | 18 | category_filter_.Clear(); |
1427 | 18 | base::subtle::NoBarrier_Store(&watch_category_, 0); |
1428 | 18 | watch_event_name_ = ""; |
1429 | 18 | UpdateCategoryGroupEnabledFlags(); |
1430 | 18 | AddMetadataEventsWhileLocked(); |
1431 | | |
1432 | 18 | dispatching_to_observer_list_ = true; |
1433 | 18 | std::vector<EnabledStateObserver*> observer_list = |
1434 | 18 | enabled_state_observer_list_; |
1435 | | |
1436 | 18 | { |
1437 | | // Dispatch to observers outside the lock in case the observer triggers a |
1438 | | // trace event. |
1439 | 18 | lock_.Unlock(); |
1440 | 18 | for (const auto& observer : observer_list) |
1441 | 0 | observer->OnTraceLogDisabled(); |
1442 | 18 | lock_.Lock(); |
1443 | 18 | } |
1444 | 18 | dispatching_to_observer_list_ = false; |
1445 | 18 | } |
1446 | | |
1447 | 0 | int TraceLog::GetNumTracesRecorded() { |
1448 | 0 | SpinLockHolder lock(&lock_); |
1449 | 0 | if (!IsEnabled()) |
1450 | 0 | return -1; |
1451 | 0 | return num_traces_recorded_; |
1452 | 0 | } |
1453 | | |
1454 | 0 | void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) { |
1455 | 0 | enabled_state_observer_list_.push_back(listener); |
1456 | 0 | } |
1457 | | |
1458 | 0 | void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) { |
1459 | 0 | auto it = std::find(enabled_state_observer_list_.begin(), |
1460 | 0 | enabled_state_observer_list_.end(), listener); |
1461 | 0 | if (it != enabled_state_observer_list_.end()) |
1462 | 0 | enabled_state_observer_list_.erase(it); |
1463 | 0 | } |
1464 | | |
1465 | 0 | bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const { |
1466 | 0 | auto it = std::find(enabled_state_observer_list_.begin(), |
1467 | 0 | enabled_state_observer_list_.end(), listener); |
1468 | 0 | return it != enabled_state_observer_list_.end(); |
1469 | 0 | } |
1470 | | |
1471 | 1 | float TraceLog::GetBufferPercentFull() const { |
1472 | 1 | SpinLockHolder lock(&lock_); |
1473 | 1 | return static_cast<float>(static_cast<double>(logged_events_->Size()) / |
1474 | 1 | logged_events_->Capacity()); |
1475 | 1 | } |
1476 | | |
1477 | 255k | bool TraceLog::BufferIsFull() const { |
1478 | 255k | SpinLockHolder lock(&lock_); |
1479 | 255k | return logged_events_->IsFull(); |
1480 | 255k | } |
1481 | | |
1482 | 16.2k | TraceBuffer* TraceLog::CreateTraceBuffer() { |
1483 | 16.2k | Options options = trace_options(); |
1484 | 16.2k | if (options & RECORD_CONTINUOUSLY) |
1485 | 17 | return new TraceBufferRingBuffer(kTraceEventRingBufferChunks); |
1486 | 16.2k | else if ((options & ENABLE_SAMPLING) && mode_ == MONITORING_MODE) |
1487 | 0 | return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks); |
1488 | 16.2k | else if (options & ECHO_TO_CONSOLE) |
1489 | 1 | return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks); |
1490 | 16.2k | return new TraceBufferVector(); |
1491 | 16.2k | } |
1492 | | |
1493 | | TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked( |
1494 | 28 | TraceEventHandle* handle, bool check_buffer_is_full) { |
1495 | 28 | DCHECK(lock_.IsHeld()); |
1496 | | |
1497 | 28 | if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) { |
1498 | 0 | logged_events_->ReturnChunk(thread_shared_chunk_index_, |
1499 | 0 | std::move(thread_shared_chunk_)); |
1500 | 0 | } |
1501 | | |
1502 | 28 | if (!thread_shared_chunk_) { |
1503 | 18 | thread_shared_chunk_ = logged_events_->GetChunk( |
1504 | 18 | &thread_shared_chunk_index_); |
1505 | 18 | if (check_buffer_is_full) |
1506 | 0 | CheckIfBufferIsFullWhileLocked(); |
1507 | 18 | } |
1508 | 28 | if (!thread_shared_chunk_) |
1509 | 0 | return nullptr; |
1510 | | |
1511 | 28 | size_t event_index; |
1512 | 28 | TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index); |
1513 | 28 | if (trace_event && handle) { |
1514 | 0 | MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_, |
1515 | 0 | event_index, handle); |
1516 | 0 | } |
1517 | 28 | return trace_event; |
1518 | 28 | } |
1519 | | |
1520 | 4.92k | void TraceLog::CheckIfBufferIsFullWhileLocked() { |
1521 | 4.92k | DCHECK(lock_.IsHeld()); |
1522 | 4.92k | if (logged_events_->IsFull()) |
1523 | 1 | SetDisabledWhileLocked(); |
1524 | 4.92k | } |
1525 | | |
1526 | | void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter, |
1527 | 7 | EventCallback cb) { |
1528 | 7 | SpinLockHolder lock(&lock_); |
1529 | 7 | base::subtle::NoBarrier_Store(&event_callback_, |
1530 | 7 | reinterpret_cast<AtomicWord>(cb)); |
1531 | 7 | event_callback_category_filter_ = category_filter; |
1532 | 7 | UpdateCategoryGroupEnabledFlags(); |
1533 | 7 | } |
1534 | | |
1535 | 7 | void TraceLog::SetEventCallbackDisabled() { |
1536 | 7 | SpinLockHolder lock(&lock_); |
1537 | 7 | base::subtle::NoBarrier_Store(&event_callback_, 0); |
1538 | 7 | UpdateCategoryGroupEnabledFlags(); |
1539 | 7 | } |
1540 | | |
1541 | | // Flush() works as the following: |
1542 | | // |
1543 | | // We ensure by taking the global lock that we have exactly one Flusher thread |
1544 | | // (the caller of this function) and some number of "target" threads. We do |
1545 | | // not want to block the target threads, since they are running application code, |
1546 | | // so this implementation takes an approach based on asymmetric synchronization. |
1547 | | // |
1548 | | // For each active thread, we grab its PerThreadInfo object, which may contain |
1549 | | // a pointer to its active trace chunk. We use an AtomicExchange to swap this |
1550 | | // out for a null pointer. This ensures that, on the *next* TRACE call made by |
1551 | | // that thread, it will see a NULL buffer and create a _new_ trace buffer. That |
1552 | | // new buffer would be assigned the generation of the next collection and we don't |
1553 | | // have to worry about it in the current Flush(). |
1554 | | // |
1555 | | // However, the swap doesn't ensure that the thread doesn't already have a local copy of |
1556 | | // the 'event_buffer_' that we are trying to flush. So, if the thread is in the |
1557 | | // middle of a Trace call, we have to wait until it exits. We do that by spinning |
1558 | | // on the 'is_in_trace_event_' member of that thread's thread-local structure. |
1559 | | // |
1560 | | // After we've swapped the buffer pointer and waited on the thread to exit any |
1561 | | // concurrent Trace() call, we know that no other thread can hold a pointer to |
1562 | | // the trace buffer, and we can safely flush it and delete it. |
1563 | 23 | void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
1564 | 23 | if (IsEnabled()) { |
1565 | | // Can't flush when tracing is enabled because otherwise PostTask would |
1566 | | // - generate more trace events; |
1567 | | // - deschedule the calling thread on some platforms causing inaccurate |
1568 | | // timing of the trace events. |
1569 | 0 | scoped_refptr<RefCountedString> empty_result = new RefCountedString; |
1570 | 0 | if (!cb.is_null()) |
1571 | 0 | cb.Run(empty_result, false); |
1572 | 0 | LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled"; |
1573 | 0 | return; |
1574 | 0 | } |
1575 | | |
1576 | 23 | int generation = this->generation(); |
1577 | 23 | { |
1578 | | // Holding the active threads lock ensures that no thread will exit and |
1579 | | // delete its own PerThreadInfo object. |
1580 | 23 | MutexLock l(active_threads_lock_); |
1581 | 18 | for (const ActiveThreadMap::value_type& entry : active_threads_) { |
1582 | 18 | int64_t tid = entry.first; |
1583 | 18 | PerThreadInfo* thr_info = entry.second; |
1584 | | |
1585 | | // Swap out their buffer from their thread-local data. |
1586 | | // After this, any _future_ trace calls on that thread will create a new buffer |
1587 | | // and not use the one we obtain here. |
1588 | 18 | ThreadLocalEventBuffer* buf = thr_info->AtomicTakeBuffer(); |
1589 | | |
1590 | | // If this thread hasn't traced anything since our last |
1591 | | // flush, we can skip it. |
1592 | 18 | if (!buf) { |
1593 | 2 | continue; |
1594 | 2 | } |
1595 | | |
1596 | | // The buffer may still be in use by that thread if they're in a call. Sleep until |
1597 | | // they aren't, so we can flush/delete their old buffer. |
1598 | | // |
1599 | | // It's important that we do not hold 'lock_' here, because otherwise we can get a |
1600 | | // deadlock: a thread may be in the middle of a trace event (is_in_trace_event_ == |
1601 | | // true) and waiting to take lock_, while we are holding the lock and waiting for it |
1602 | | // to not be in the trace event. |
1603 | 16 | while (base::subtle::Acquire_Load(&thr_info->is_in_trace_event_)) { |
1604 | 0 | sched_yield(); |
1605 | 0 | } |
1606 | | |
1607 | 16 | { |
1608 | 16 | SpinLockHolder lock(&lock_); |
1609 | 16 | buf->Flush(tid); |
1610 | 16 | } |
1611 | 16 | delete buf; |
1612 | 16 | } |
1613 | 23 | } |
1614 | | |
1615 | 23 | { |
1616 | 23 | SpinLockHolder lock(&lock_); |
1617 | | |
1618 | 23 | if (thread_shared_chunk_) { |
1619 | 17 | logged_events_->ReturnChunk(thread_shared_chunk_index_, |
1620 | 17 | std::move(thread_shared_chunk_)); |
1621 | 17 | } |
1622 | 23 | } |
1623 | | |
1624 | 23 | FinishFlush(generation, cb); |
1625 | 23 | } |
1626 | | |
1627 | | void TraceLog::ConvertTraceEventsToTraceFormat( |
1628 | | std::unique_ptr<TraceBuffer> logged_events, |
1629 | 23 | const TraceLog::OutputCallback& flush_output_callback) { |
1630 | | |
1631 | 23 | if (flush_output_callback.is_null()) |
1632 | 0 | return; |
1633 | | |
1634 | | // The callback need to be called at least once even if there is no events |
1635 | | // to let the caller know the completion of flush. |
1636 | 23 | bool has_more_events = true; |
1637 | 348 | do { |
1638 | 348 | scoped_refptr<RefCountedString> json_events_str_ptr = |
1639 | 348 | new RefCountedString(); |
1640 | | |
1641 | 5.28k | for (size_t i = 0; i < kTraceEventBatchChunks; ++i) { |
1642 | 4.96k | const TraceBufferChunk* chunk = logged_events->NextChunk(); |
1643 | 4.96k | if (!chunk) { |
1644 | 23 | has_more_events = false; |
1645 | 23 | break; |
1646 | 23 | } |
1647 | 318k | for (size_t j = 0; j < chunk->size(); ++j) { |
1648 | 314k | if (i > 0 || j > 0) |
1649 | 313k | json_events_str_ptr->data().append(","); |
1650 | 314k | chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data())); |
1651 | 314k | } |
1652 | 4.93k | } |
1653 | | |
1654 | 348 | flush_output_callback.Run(json_events_str_ptr, has_more_events); |
1655 | 348 | } while (has_more_events); |
1656 | 23 | } |
1657 | | |
1658 | | void TraceLog::FinishFlush(int generation, |
1659 | 23 | const TraceLog::OutputCallback& flush_output_callback) { |
1660 | 23 | std::unique_ptr<TraceBuffer> previous_logged_events; |
1661 | | |
1662 | 23 | if (!CheckGeneration(generation)) |
1663 | 0 | return; |
1664 | | |
1665 | 23 | { |
1666 | 23 | SpinLockHolder lock(&lock_); |
1667 | | |
1668 | 23 | previous_logged_events.swap(logged_events_); |
1669 | 23 | UseNextTraceBuffer(); |
1670 | 23 | } |
1671 | | |
1672 | 23 | ConvertTraceEventsToTraceFormat(std::move(previous_logged_events), flush_output_callback); |
1673 | 23 | } |
1674 | | |
1675 | | void TraceLog::FlushButLeaveBufferIntact( |
1676 | 0 | const TraceLog::OutputCallback& flush_output_callback) { |
1677 | 0 | std::unique_ptr<TraceBuffer> previous_logged_events; |
1678 | 0 | { |
1679 | 0 | SpinLockHolder lock(&lock_); |
1680 | 0 | if (mode_ == DISABLED || (trace_options_ & RECORD_CONTINUOUSLY) == 0) { |
1681 | 0 | scoped_refptr<RefCountedString> empty_result = new RefCountedString; |
1682 | 0 | flush_output_callback.Run(empty_result, false); |
1683 | 0 | LOG(WARNING) << "Ignored TraceLog::FlushButLeaveBufferIntact when monitoring is not enabled"; |
1684 | 0 | return; |
1685 | 0 | } |
1686 | | |
1687 | 0 | AddMetadataEventsWhileLocked(); |
1688 | 0 | if (thread_shared_chunk_) { |
1689 | | // Return the chunk to the main buffer to flush the sampling data. |
1690 | 0 | logged_events_->ReturnChunk(thread_shared_chunk_index_, std::move(thread_shared_chunk_)); |
1691 | 0 | } |
1692 | 0 | previous_logged_events = logged_events_->CloneForIteration(); |
1693 | 0 | } |
1694 | |
|
1695 | 0 | ConvertTraceEventsToTraceFormat(std::move(previous_logged_events), flush_output_callback); |
1696 | 0 | } |
1697 | | |
1698 | 32 | void TraceLog::UseNextTraceBuffer() { |
1699 | 32 | logged_events_.reset(CreateTraceBuffer()); |
1700 | 32 | base::subtle::NoBarrier_AtomicIncrement(&generation_, 1); |
1701 | 32 | thread_shared_chunk_.reset(); |
1702 | 32 | thread_shared_chunk_index_ = 0; |
1703 | 32 | } |
1704 | | |
1705 | | TraceEventHandle TraceLog::AddTraceEvent( |
1706 | | char phase, |
1707 | | const unsigned char* category_group_enabled, |
1708 | | const char* name, |
1709 | | uint64_t id, |
1710 | | int num_args, |
1711 | | const char** arg_names, |
1712 | | const unsigned char* arg_types, |
1713 | | const uint64_t* arg_values, |
1714 | | const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
1715 | 101 | unsigned char flags) { |
1716 | 101 | auto thread_id = Thread::UniqueThreadId(); |
1717 | 101 | MicrosecondsInt64 now = GetMonoTimeMicros(); |
1718 | 101 | return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, |
1719 | 101 | name, id, thread_id, now, |
1720 | 101 | num_args, arg_names, |
1721 | 101 | arg_types, arg_values, |
1722 | 101 | convertable_values, flags); |
1723 | 101 | } |
1724 | | |
1725 | 18 | TraceLog::PerThreadInfo* TraceLog::SetupThreadLocalBuffer() { |
1726 | 18 | int64_t cur_tid = Thread::UniqueThreadId(); |
1727 | | |
1728 | 18 | auto thr_info = new PerThreadInfo(); |
1729 | 18 | thr_info->event_buffer_ = nullptr; |
1730 | 18 | thr_info->is_in_trace_event_ = 0; |
1731 | 18 | thread_local_info_ = thr_info; |
1732 | | |
1733 | 18 | Thread* t = Thread::current_thread(); |
1734 | 18 | if (t) { |
1735 | 8 | t->CallAtExit(Bind(&TraceLog::ThreadExiting, Unretained(this))); |
1736 | 8 | } |
1737 | | |
1738 | 18 | { |
1739 | 18 | MutexLock lock(active_threads_lock_); |
1740 | 18 | InsertOrDie(&active_threads_, cur_tid, thr_info); |
1741 | 18 | } |
1742 | 18 | return thr_info; |
1743 | 18 | } |
1744 | | |
1745 | 8 | void TraceLog::ThreadExiting() { |
1746 | 8 | PerThreadInfo* thr_info = thread_local_info_; |
1747 | 8 | if (!thr_info) { |
1748 | 0 | return; |
1749 | 0 | } |
1750 | | |
1751 | 8 | int64_t cur_tid = Thread::UniqueThreadId(); |
1752 | | |
1753 | | // Flush our own buffer back to the central event buffer. |
1754 | | // We do the atomic exchange because a flusher thread may |
1755 | | // also be trying to flush us at the same time, and we need to avoid |
1756 | | // conflict. |
1757 | 8 | ThreadLocalEventBuffer* buf = thr_info->AtomicTakeBuffer(); |
1758 | 8 | if (buf) { |
1759 | 7 | SpinLockHolder lock(&lock_); |
1760 | 7 | buf->Flush(Thread::UniqueThreadId()); |
1761 | 7 | } |
1762 | 8 | delete buf; |
1763 | | |
1764 | 8 | { |
1765 | 8 | MutexLock lock(active_threads_lock_); |
1766 | 8 | active_threads_.erase(cur_tid); |
1767 | 8 | } |
1768 | 8 | delete thr_info; |
1769 | 8 | } |
1770 | | |
1771 | | TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
1772 | | char phase, |
1773 | | const unsigned char* category_group_enabled, |
1774 | | const char* name, |
1775 | | uint64_t id, |
1776 | | int64_t thread_id, |
1777 | | const MicrosecondsInt64& timestamp, |
1778 | | int num_args, |
1779 | | const char** arg_names, |
1780 | | const unsigned char* arg_types, |
1781 | | const uint64_t* arg_values, |
1782 | | const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
1783 | 312k | unsigned char flags) { |
1784 | 312k | TraceEventHandle handle = { 0, 0, 0 }; |
1785 | 312k | if (!*category_group_enabled) |
1786 | 0 | return handle; |
1787 | | |
1788 | 312k | DCHECK(name); |
1789 | | |
1790 | 312k | if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
1791 | 0 | id ^= process_id_hash_; |
1792 | | |
1793 | 312k | MicrosecondsInt64 now = OffsetTimestamp(timestamp); |
1794 | 312k | MicrosecondsInt64 thread_now = GetThreadCpuTimeMicros(); |
1795 | | |
1796 | 312k | PerThreadInfo* thr_info = thread_local_info_; |
1797 | 312k | if (PREDICT_FALSE(!thr_info)) { |
1798 | 18 | thr_info = SetupThreadLocalBuffer(); |
1799 | 18 | } |
1800 | | |
1801 | | // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
1802 | | // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
1803 | | // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... |
1804 | 312k | if (base::subtle::NoBarrier_Load(&thr_info->is_in_trace_event_)) |
1805 | 0 | return handle; |
1806 | | |
1807 | 312k | MarkFlagInScope thread_is_in_trace_event(&thr_info->is_in_trace_event_); |
1808 | | |
1809 | 312k | ThreadLocalEventBuffer* thread_local_event_buffer = |
1810 | 312k | reinterpret_cast<ThreadLocalEventBuffer*>( |
1811 | 312k | base::subtle::NoBarrier_Load( |
1812 | 312k | reinterpret_cast<AtomicWord*>(&thr_info->event_buffer_))); |
1813 | | |
1814 | | // If we have an event buffer, but it's a left-over from a previous trace, |
1815 | | // delete it. |
1816 | 312k | if (PREDICT_FALSE(thread_local_event_buffer && |
1817 | 0 | !CheckGeneration(thread_local_event_buffer->generation()))) { |
1818 | | // We might also race against a flusher thread, so we have to atomically |
1819 | | // take the buffer. |
1820 | 0 | thread_local_event_buffer = thr_info->AtomicTakeBuffer(); |
1821 | 0 | delete thread_local_event_buffer; |
1822 | 0 | thread_local_event_buffer = nullptr; |
1823 | 0 | } |
1824 | | |
1825 | | // If there is no current buffer, create one for this event. |
1826 | 312k | if (PREDICT_FALSE(!thread_local_event_buffer)) { |
1827 | 24 | thread_local_event_buffer = new ThreadLocalEventBuffer(this); |
1828 | | |
1829 | 24 | base::subtle::NoBarrier_Store( |
1830 | 24 | reinterpret_cast<AtomicWord*>(&thr_info->event_buffer_), |
1831 | 24 | reinterpret_cast<AtomicWord>(thread_local_event_buffer)); |
1832 | 24 | } |
1833 | | |
1834 | | // Check and update the current thread name only if the event is for the |
1835 | | // current thread to avoid locks in most cases. |
1836 | 312k | if (thread_id == Thread::UniqueThreadId()) { |
1837 | 308k | Thread* yb_thr = Thread::current_thread(); |
1838 | 308k | if (yb_thr) { |
1839 | 52.4k | const char* new_name = yb_thr->name().c_str(); |
1840 | | // Check if the thread name has been set or changed since the previous |
1841 | | // call (if any), but don't bother if the new name is empty. Note this will |
1842 | | // not detect a thread name change within the same char* buffer address: we |
1843 | | // favor common case performance over corner case correctness. |
1844 | 52.4k | if (PREDICT_FALSE(new_name != g_current_thread_name && |
1845 | 8 | new_name && *new_name)) { |
1846 | 8 | g_current_thread_name = new_name; |
1847 | | |
1848 | 8 | SpinLockHolder thread_info_lock(&thread_info_lock_); |
1849 | | |
1850 | 8 | auto existing_name = thread_names_.find(thread_id); |
1851 | 8 | if (existing_name == thread_names_.end()) { |
1852 | | // This is a new thread id, and a new name. |
1853 | 8 | thread_names_[thread_id] = new_name; |
1854 | 0 | } else { |
1855 | | // This is a thread id that we've seen before, but potentially with a |
1856 | | // new name. |
1857 | 0 | std::vector<GStringPiece> existing_names = strings::Split(existing_name->second, ","); |
1858 | 0 | bool found = std::find(existing_names.begin(), |
1859 | 0 | existing_names.end(), |
1860 | 0 | new_name) != existing_names.end(); |
1861 | 0 | if (!found) { |
1862 | 0 | if (existing_names.size()) |
1863 | 0 | existing_name->second.push_back(','); |
1864 | 0 | existing_name->second.append(new_name); |
1865 | 0 | } |
1866 | 0 | } |
1867 | 8 | } |
1868 | 52.4k | } |
1869 | 308k | } |
1870 | | |
1871 | 312k | std::string console_message; |
1872 | 312k | if (*category_group_enabled & |
1873 | 308k | (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) { |
1874 | 308k | TraceEvent* trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
1875 | | |
1876 | 310k | if (trace_event) { |
1877 | 310k | trace_event->Initialize(thread_id, now, thread_now, phase, |
1878 | 310k | category_group_enabled, name, id, |
1879 | 310k | num_args, arg_names, arg_types, arg_values, |
1880 | 310k | convertable_values, flags); |
1881 | | |
1882 | | #if defined(OS_ANDROID) |
1883 | | trace_event->SendToATrace(); |
1884 | | #endif |
1885 | 310k | } |
1886 | | |
1887 | 308k | if (trace_options() & ECHO_TO_CONSOLE) { |
1888 | 1 | console_message = EventToConsoleMessage( |
1889 | 1 | phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, |
1890 | 1 | timestamp, trace_event); |
1891 | 1 | } |
1892 | 308k | } |
1893 | | |
1894 | 312k | if (PREDICT_FALSE(console_message.size())) |
1895 | 1 | LOG(ERROR) << console_message; |
1896 | | |
1897 | 312k | if (PREDICT_FALSE(reinterpret_cast<const unsigned char*>( |
1898 | 0 | base::subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled)) { |
1899 | 0 | bool event_name_matches; |
1900 | 0 | WatchEventCallback watch_event_callback_copy; |
1901 | 0 | { |
1902 | 0 | SpinLockHolder lock(&lock_); |
1903 | 0 | event_name_matches = watch_event_name_ == name; |
1904 | 0 | watch_event_callback_copy = watch_event_callback_; |
1905 | 0 | } |
1906 | 0 | if (event_name_matches) { |
1907 | 0 | if (!watch_event_callback_copy.is_null()) |
1908 | 0 | watch_event_callback_copy.Run(); |
1909 | 0 | } |
1910 | 0 | } |
1911 | | |
1912 | 312k | if (PREDICT_FALSE(*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK)) { |
1913 | 16 | EventCallback event_callback = reinterpret_cast<EventCallback>( |
1914 | 16 | base::subtle::NoBarrier_Load(&event_callback_)); |
1915 | 16 | if (event_callback) { |
1916 | 16 | event_callback(now, |
1917 | 16 | phase == TRACE_EVENT_PHASE_COMPLETE ? |
1918 | 12 | TRACE_EVENT_PHASE_BEGIN : phase, |
1919 | 16 | category_group_enabled, name, id, |
1920 | 16 | num_args, arg_names, arg_types, arg_values, |
1921 | 16 | flags); |
1922 | 16 | } |
1923 | 16 | } |
1924 | | |
1925 | 312k | return handle; |
1926 | 312k | } |
1927 | | |
1928 | | // May be called when a COMPLETE event ends and the unfinished event has been |
1929 | | // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). |
1930 | | std::string TraceLog::EventToConsoleMessage(unsigned char phase, |
1931 | | const MicrosecondsInt64& timestamp, |
1932 | 1 | TraceEvent* trace_event) { |
1933 | 1 | SpinLockHolder thread_info_lock(&thread_info_lock_); |
1934 | | |
1935 | | // The caller should translate TRACE_EVENT_PHASE_COMPLETE to |
1936 | | // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END. |
1937 | 1 | DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE); |
1938 | | |
1939 | 1 | MicrosecondsInt64 duration = 0; |
1940 | 1 | auto thread_id = trace_event ? trace_event->thread_id() : Thread::UniqueThreadId(); |
1941 | 1 | if (phase == TRACE_EVENT_PHASE_END) { |
1942 | 0 | duration = timestamp - thread_event_start_times_[thread_id].top(); |
1943 | 0 | thread_event_start_times_[thread_id].pop(); |
1944 | 0 | } |
1945 | | |
1946 | 1 | std::string thread_name = thread_names_[thread_id]; |
1947 | 1 | if (thread_colors_.find(thread_name) == thread_colors_.end()) |
1948 | 1 | thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; |
1949 | | |
1950 | 1 | std::ostringstream log; |
1951 | 1 | log << StringPrintf("%s: \x1b[0;3%dm", |
1952 | 1 | thread_name.c_str(), |
1953 | 1 | thread_colors_[thread_name]); |
1954 | | |
1955 | 1 | size_t depth = 0; |
1956 | 1 | if (thread_event_start_times_.find(thread_id) != |
1957 | 1 | thread_event_start_times_.end()) |
1958 | 0 | depth = thread_event_start_times_[thread_id].size(); |
1959 | | |
1960 | 1 | for (size_t i = 0; i < depth; ++i) |
1961 | 0 | log << "| "; |
1962 | | |
1963 | 1 | if (trace_event) |
1964 | 1 | trace_event->AppendPrettyPrinted(&log); |
1965 | 1 | if (phase == TRACE_EVENT_PHASE_END) |
1966 | 0 | log << StringPrintf(" (%.3f ms)", duration / 1000.0f); |
1967 | | |
1968 | 1 | log << "\x1b[0;m"; |
1969 | | |
1970 | 1 | if (phase == TRACE_EVENT_PHASE_BEGIN) |
1971 | 0 | thread_event_start_times_[thread_id].push(timestamp); |
1972 | | |
1973 | 1 | return log.str(); |
1974 | 1 | } |
1975 | | |
1976 | | void TraceLog::AddTraceEventEtw(char phase, |
1977 | | const char* name, |
1978 | | const void* id, |
1979 | 0 | const char* extra) { |
1980 | | #if defined(OS_WIN) |
1981 | | TraceEventETWProvider::Trace(name, phase, id, extra); |
1982 | | #endif |
1983 | 0 | INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, |
1984 | 0 | TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); |
1985 | 0 | } |
1986 | | |
1987 | | void TraceLog::AddTraceEventEtw(char phase, |
1988 | | const char* name, |
1989 | | const void* id, |
1990 | 0 | const std::string& extra) { |
1991 | | #if defined(OS_WIN) |
1992 | | TraceEventETWProvider::Trace(name, phase, id, extra); |
1993 | | #endif |
1994 | 0 | INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, |
1995 | 0 | TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); |
1996 | 0 | } |
1997 | | |
1998 | | void TraceLog::UpdateTraceEventDuration( |
1999 | | const unsigned char* category_group_enabled, |
2000 | | const char* name, |
2001 | 53.6k | TraceEventHandle handle) { |
2002 | | |
2003 | 53.6k | PerThreadInfo* thr_info = thread_local_info_; |
2004 | 53.6k | if (!thr_info) { |
2005 | 0 | thr_info = SetupThreadLocalBuffer(); |
2006 | 0 | } |
2007 | | |
2008 | | // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
2009 | | // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
2010 | | // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... |
2011 | 53.6k | if (base::subtle::NoBarrier_Load(&thr_info->is_in_trace_event_)) |
2012 | 0 | return; |
2013 | 53.6k | MarkFlagInScope thread_is_in_trace_event(&thr_info->is_in_trace_event_); |
2014 | | |
2015 | 53.6k | MicrosecondsInt64 thread_now = GetThreadCpuTimeMicros(); |
2016 | 53.6k | MicrosecondsInt64 now = OffsetNow(); |
2017 | | |
2018 | 53.6k | std::string console_message; |
2019 | 53.6k | if (*category_group_enabled & ENABLED_FOR_RECORDING) { |
2020 | 51.9k | OptionalAutoLock lock(&lock_); |
2021 | | |
2022 | 51.9k | TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); |
2023 | 51.9k | if (trace_event) { |
2024 | 51.7k | DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); |
2025 | 51.7k | trace_event->UpdateDuration(now, thread_now); |
2026 | | #if defined(OS_ANDROID) |
2027 | | trace_event->SendToATrace(); |
2028 | | #endif |
2029 | 51.7k | } |
2030 | | |
2031 | 51.9k | if (trace_options() & ECHO_TO_CONSOLE) { |
2032 | 0 | console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END, |
2033 | 0 | now, trace_event); |
2034 | 0 | } |
2035 | 51.9k | } |
2036 | | |
2037 | 53.6k | if (console_message.size()) |
2038 | 0 | LOG(ERROR) << console_message; |
2039 | | |
2040 | 53.6k | if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) { |
2041 | 4 | EventCallback event_callback = reinterpret_cast<EventCallback>( |
2042 | 4 | base::subtle::NoBarrier_Load(&event_callback_)); |
2043 | 4 | if (event_callback) { |
2044 | 4 | event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name, |
2045 | 4 | trace_event_internal::kNoEventId, 0, nullptr, nullptr, nullptr, |
2046 | 4 | TRACE_EVENT_FLAG_NONE); |
2047 | 4 | } |
2048 | 4 | } |
2049 | 53.6k | } |
2050 | | |
2051 | | void TraceLog::SetWatchEvent(const std::string& category_name, |
2052 | | const std::string& event_name, |
2053 | 0 | const WatchEventCallback& callback) { |
2054 | 0 | const unsigned char* category = GetCategoryGroupEnabled( |
2055 | 0 | category_name.c_str()); |
2056 | 0 | SpinLockHolder lock(&lock_); |
2057 | 0 | base::subtle::NoBarrier_Store(&watch_category_, |
2058 | 0 | reinterpret_cast<AtomicWord>(category)); |
2059 | 0 | watch_event_name_ = event_name; |
2060 | 0 | watch_event_callback_ = callback; |
2061 | 0 | } |
2062 | | |
2063 | 0 | void TraceLog::CancelWatchEvent() { |
2064 | 0 | SpinLockHolder lock(&lock_); |
2065 | 0 | base::subtle::NoBarrier_Store(&watch_category_, 0); |
2066 | 0 | watch_event_name_ = ""; |
2067 | 0 | watch_event_callback_.Reset(); |
2068 | 0 | } |
2069 | | |
2070 | 18 | void TraceLog::AddMetadataEventsWhileLocked() { |
2071 | 18 | DCHECK(lock_.IsHeld()); |
2072 | | |
2073 | 18 | #if !defined(OS_NACL) // NaCl shouldn't expose the process id. |
2074 | 18 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(nullptr, false), |
2075 | 18 | 0, |
2076 | 18 | "num_cpus", "number", |
2077 | 18 | base::NumCPUs()); |
2078 | 18 | #endif |
2079 | | |
2080 | | |
2081 | 18 | int current_thread_id = static_cast<int>(yb::Thread::UniqueThreadId()); |
2082 | 18 | if (process_sort_index_ != 0) { |
2083 | 0 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(nullptr, false), |
2084 | 0 | current_thread_id, |
2085 | 0 | "process_sort_index", "sort_index", |
2086 | 0 | process_sort_index_); |
2087 | 0 | } |
2088 | | |
2089 | 18 | if (process_name_.size()) { |
2090 | 0 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(nullptr, false), |
2091 | 0 | current_thread_id, |
2092 | 0 | "process_name", "name", |
2093 | 0 | process_name_); |
2094 | 0 | } |
2095 | | |
2096 | 18 | if (process_labels_.size() > 0) { |
2097 | 0 | std::vector<std::string> labels; |
2098 | 0 | for (auto& label : process_labels_) { |
2099 | 0 | labels.push_back(label.second); |
2100 | 0 | } |
2101 | 0 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(nullptr, false), |
2102 | 0 | current_thread_id, |
2103 | 0 | "process_labels", "labels", |
2104 | 0 | JoinStrings(labels, ",")); |
2105 | 0 | } |
2106 | | |
2107 | | // Thread sort indices. |
2108 | 0 | for (auto& sort_index : thread_sort_indices_) { |
2109 | 0 | if (sort_index.second == 0) |
2110 | 0 | continue; |
2111 | 0 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(nullptr, false), |
2112 | 0 | sort_index.first, |
2113 | 0 | "thread_sort_index", "sort_index", |
2114 | 0 | sort_index.second); |
2115 | 0 | } |
2116 | | |
2117 | | // Thread names. |
2118 | 18 | SpinLockHolder thread_info_lock(&thread_info_lock_); |
2119 | 11 | for (auto& name : thread_names_) { |
2120 | 11 | if (name.second.empty()) |
2121 | 1 | continue; |
2122 | 10 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(nullptr, false), |
2123 | 10 | name.first, |
2124 | 10 | "thread_name", "name", |
2125 | 10 | name.second); |
2126 | 10 | } |
2127 | 18 | } |
2128 | | |
2129 | | |
2130 | 0 | TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { |
2131 | 0 | return GetEventByHandleInternal(handle, nullptr); |
2132 | 0 | } |
2133 | | |
2134 | | TraceEvent* NO_THREAD_SAFETY_ANALYSIS TraceLog::GetEventByHandleInternal( |
2135 | 52.0k | TraceEventHandle handle, OptionalAutoLock* lock) { |
2136 | 52.0k | TraceLog::PerThreadInfo* thr_info = TraceLog::thread_local_info_; |
2137 | | |
2138 | 52.0k | if (!handle.chunk_seq) |
2139 | 0 | return nullptr; |
2140 | | |
2141 | 52.1k | if (thr_info) { |
2142 | 52.1k | ThreadLocalEventBuffer* buf = |
2143 | 52.1k | reinterpret_cast<ThreadLocalEventBuffer*>( |
2144 | 52.1k | base::subtle::NoBarrier_Load( |
2145 | 52.1k | reinterpret_cast<AtomicWord*>(&thr_info->event_buffer_))); |
2146 | | |
2147 | 52.1k | if (buf) { |
2148 | 52.0k | DCHECK_EQ(1, ANNOTATE_UNPROTECTED_READ(thr_info->is_in_trace_event_)); |
2149 | | |
2150 | 52.0k | TraceEvent* trace_event = buf->GetEventByHandle(handle); |
2151 | 52.0k | if (trace_event) |
2152 | 51.8k | return trace_event; |
2153 | 152 | } |
2154 | 52.1k | } |
2155 | | |
2156 | | // The event has been out-of-control of the thread local buffer. |
2157 | | // Try to get the event from the main buffer with a lock. |
2158 | 152 | if (lock) |
2159 | 1 | lock->EnsureAcquired(); |
2160 | | |
2161 | 152 | if (thread_shared_chunk_ && |
2162 | 0 | handle.chunk_index == thread_shared_chunk_index_) { |
2163 | 0 | return handle.chunk_seq == thread_shared_chunk_->seq() ? |
2164 | 0 | thread_shared_chunk_->GetEventAt(handle.event_index) : nullptr; |
2165 | 0 | } |
2166 | | |
2167 | 152 | return logged_events_->GetEventByHandle(handle); |
2168 | 152 | } |
2169 | | |
2170 | 16.2k | void TraceLog::SetProcessID(int process_id) { |
2171 | 16.2k | process_id_ = process_id; |
2172 | | // Create a FNV hash from the process ID for XORing. |
2173 | | // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details. |
2174 | 16.2k | uint64_t offset_basis = 14695981039346656037ull; |
2175 | 16.2k | uint64_t fnv_prime = 1099511628211ull; |
2176 | 16.2k | uint64_t pid = static_cast<uint64_t>(process_id_); |
2177 | 16.2k | process_id_hash_ = (offset_basis ^ pid) * fnv_prime; |
2178 | 16.2k | } |
2179 | | |
2180 | 0 | void TraceLog::SetProcessSortIndex(int sort_index) { |
2181 | 0 | SpinLockHolder lock(&lock_); |
2182 | 0 | process_sort_index_ = sort_index; |
2183 | 0 | } |
2184 | | |
2185 | 0 | void TraceLog::SetProcessName(const std::string& process_name) { |
2186 | 0 | SpinLockHolder lock(&lock_); |
2187 | 0 | process_name_ = process_name; |
2188 | 0 | } |
2189 | | |
2190 | | void TraceLog::UpdateProcessLabel( |
2191 | 0 | int label_id, const std::string& current_label) { |
2192 | 0 | if(!current_label.length()) |
2193 | 0 | return RemoveProcessLabel(label_id); |
2194 | | |
2195 | 0 | SpinLockHolder lock(&lock_); |
2196 | 0 | process_labels_[label_id] = current_label; |
2197 | 0 | } |
2198 | | |
2199 | 0 | void TraceLog::RemoveProcessLabel(int label_id) { |
2200 | 0 | SpinLockHolder lock(&lock_); |
2201 | 0 | auto it = process_labels_.find(label_id); |
2202 | 0 | if (it == process_labels_.end()) |
2203 | 0 | return; |
2204 | | |
2205 | 0 | process_labels_.erase(it); |
2206 | 0 | } |
2207 | | |
2208 | 0 | void TraceLog::SetThreadSortIndex(int64_t thread_id, int sort_index) { |
2209 | 0 | SpinLockHolder lock(&lock_); |
2210 | 0 | thread_sort_indices_[static_cast<int>(thread_id)] = sort_index; |
2211 | 0 | } |
2212 | | |
2213 | 0 | void TraceLog::SetTimeOffset(MicrosecondsInt64 offset) { |
2214 | 0 | time_offset_ = offset; |
2215 | 0 | } |
2216 | | |
2217 | 0 | size_t TraceLog::GetObserverCountForTest() const { |
2218 | 0 | return enabled_state_observer_list_.size(); |
2219 | 0 | } |
2220 | | |
2221 | | bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace( |
2222 | 534 | const std::string& str) { |
2223 | 534 | return str.empty() || |
2224 | 534 | str.at(0) == ' ' || |
2225 | 534 | str.at(str.length() - 1) == ' '; |
2226 | 534 | } |
2227 | | |
2228 | | bool CategoryFilter::DoesCategoryGroupContainCategory( |
2229 | | const char* category_group, |
2230 | 534 | const char* category) const { |
2231 | 534 | DCHECK(category); |
2232 | 534 | vector<string> pieces = strings::Split(category_group, ","); |
2233 | 534 | for (const string& category_group_token : pieces) { |
2234 | | // Don't allow empty tokens, nor tokens with leading or trailing space. |
2235 | 0 | DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace( |
2236 | 0 | category_group_token)) |
2237 | 0 | << "Disallowed category string"; |
2238 | | |
2239 | 534 | if (MatchPattern(category_group_token.c_str(), category)) |
2240 | 72 | return true; |
2241 | 534 | } |
2242 | 462 | return false; |
2243 | 534 | } |
2244 | | |
2245 | 32.4k | CategoryFilter::CategoryFilter(const std::string& filter_string) { |
2246 | 32.4k | if (!filter_string.empty()) |
2247 | 32.4k | Initialize(filter_string); |
2248 | 0 | else |
2249 | 0 | Initialize(CategoryFilter::kDefaultCategoryFilterString); |
2250 | 32.4k | } |
2251 | | |
2252 | | CategoryFilter::CategoryFilter(const CategoryFilter& cf) |
2253 | | : included_(cf.included_), |
2254 | | disabled_(cf.disabled_), |
2255 | | excluded_(cf.excluded_), |
2256 | 18 | delays_(cf.delays_) { |
2257 | 18 | } |
2258 | | |
2259 | 43 | CategoryFilter::~CategoryFilter() { |
2260 | 43 | } |
2261 | | |
2262 | 25 | CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) { |
2263 | 25 | if (this == &rhs) |
2264 | 0 | return *this; |
2265 | | |
2266 | 25 | included_ = rhs.included_; |
2267 | 25 | disabled_ = rhs.disabled_; |
2268 | 25 | excluded_ = rhs.excluded_; |
2269 | 25 | delays_ = rhs.delays_; |
2270 | 25 | return *this; |
2271 | 25 | } |
2272 | | |
2273 | 32.4k | void CategoryFilter::Initialize(const std::string& filter_string) { |
2274 | | // Tokenize list of categories, delimited by ','. |
2275 | 32.4k | vector<string> tokens = strings::Split(filter_string, ","); |
2276 | | // Add each token to the appropriate list (included_,excluded_). |
2277 | 64.9k | for (string category : tokens) { |
2278 | | // Ignore empty categories. |
2279 | 64.9k | if (category.empty()) |
2280 | 0 | continue; |
2281 | | // Synthetic delays are of the form 'DELAY(delay;option;option;...)'. |
2282 | 64.9k | if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 && |
2283 | 0 | category.at(category.size() - 1) == ')') { |
2284 | 0 | category = category.substr( |
2285 | 0 | strlen(kSyntheticDelayCategoryFilterPrefix), |
2286 | 0 | category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1); |
2287 | 0 | size_t name_length = category.find(';'); |
2288 | 0 | if (name_length != std::string::npos && name_length > 0 && |
2289 | 0 | name_length != category.size() - 1) { |
2290 | 0 | delays_.push_back(category); |
2291 | 0 | } |
2292 | 64.9k | } else if (category.at(0) == '-') { |
2293 | | // Excluded categories start with '-'. |
2294 | | // Remove '-' from category string. |
2295 | 64.8k | category = category.substr(1); |
2296 | 64.8k | excluded_.push_back(category); |
2297 | 14 | } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")), |
2298 | 14 | TRACE_DISABLED_BY_DEFAULT("")) == 0) { |
2299 | 0 | disabled_.push_back(category); |
2300 | 14 | } else { |
2301 | 14 | included_.push_back(category); |
2302 | 14 | } |
2303 | 64.9k | } |
2304 | 32.4k | } |
2305 | | |
2306 | | void CategoryFilter::WriteString(const StringList& values, |
2307 | | std::string* out, |
2308 | 0 | bool included) const { |
2309 | 0 | bool prepend_comma = !out->empty(); |
2310 | 0 | int token_cnt = 0; |
2311 | 0 | for (const auto& value : values) { |
2312 | 0 | if (token_cnt > 0 || prepend_comma) |
2313 | 0 | StringAppendF(out, ","); |
2314 | 0 | StringAppendF(out, "%s%s", (included ? "" : "-"), value.c_str()); |
2315 | 0 | ++token_cnt; |
2316 | 0 | } |
2317 | 0 | } |
2318 | | |
2319 | | void CategoryFilter::WriteString(const StringList& delays, |
2320 | 0 | std::string* out) const { |
2321 | 0 | bool prepend_comma = !out->empty(); |
2322 | 0 | int token_cnt = 0; |
2323 | 0 | for (const auto& delay : delays) { |
2324 | 0 | if (token_cnt > 0 || prepend_comma) |
2325 | 0 | StringAppendF(out, ","); |
2326 | 0 | StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix, |
2327 | 0 | delay.c_str()); |
2328 | 0 | ++token_cnt; |
2329 | 0 | } |
2330 | 0 | } |
2331 | | |
2332 | 0 | std::string CategoryFilter::ToString() const { |
2333 | 0 | std::string filter_string; |
2334 | 0 | WriteString(included_, &filter_string, true); |
2335 | 0 | WriteString(disabled_, &filter_string, true); |
2336 | 0 | WriteString(excluded_, &filter_string, false); |
2337 | 0 | WriteString(delays_, &filter_string); |
2338 | 0 | return filter_string; |
2339 | 0 | } |
2340 | | |
2341 | | bool CategoryFilter::IsCategoryGroupEnabled( |
2342 | 234 | const char* category_group_name) const { |
2343 | | // TraceLog should call this method only as part of enabling/disabling |
2344 | | // categories. |
2345 | 234 | StringList::const_iterator ci; |
2346 | | |
2347 | | // Check the disabled- filters and the disabled-* wildcard first so that a |
2348 | | // "*" filter does not include the disabled. |
2349 | 234 | for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) { |
2350 | 0 | if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str())) |
2351 | 0 | return true; |
2352 | 0 | } |
2353 | 234 | if (DoesCategoryGroupContainCategory(category_group_name, |
2354 | 234 | TRACE_DISABLED_BY_DEFAULT("*"))) |
2355 | 0 | return false; |
2356 | | |
2357 | 330 | for (ci = included_.begin(); ci != included_.end(); ++ci) { |
2358 | 168 | if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str())) |
2359 | 72 | return true; |
2360 | 168 | } |
2361 | | |
2362 | 294 | for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) { |
2363 | 132 | if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str())) |
2364 | 0 | return false; |
2365 | 132 | } |
2366 | | // If the category group is not excluded, and there are no included patterns |
2367 | | // we consider this pattern enabled. |
2368 | 162 | return included_.empty(); |
2369 | 162 | } |
2370 | | |
2371 | 0 | bool CategoryFilter::HasIncludedPatterns() const { |
2372 | 0 | return !included_.empty(); |
2373 | 0 | } |
2374 | | |
2375 | 0 | void CategoryFilter::Merge(const CategoryFilter& nested_filter) { |
2376 | | // Keep included patterns only if both filters have an included entry. |
2377 | | // Otherwise, one of the filter was specifying "*" and we want to honour the |
2378 | | // broadest filter. |
2379 | 0 | if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) { |
2380 | 0 | included_.insert(included_.end(), |
2381 | 0 | nested_filter.included_.begin(), |
2382 | 0 | nested_filter.included_.end()); |
2383 | 0 | } else { |
2384 | 0 | included_.clear(); |
2385 | 0 | } |
2386 | |
|
2387 | 0 | disabled_.insert(disabled_.end(), |
2388 | 0 | nested_filter.disabled_.begin(), |
2389 | 0 | nested_filter.disabled_.end()); |
2390 | 0 | excluded_.insert(excluded_.end(), |
2391 | 0 | nested_filter.excluded_.begin(), |
2392 | 0 | nested_filter.excluded_.end()); |
2393 | 0 | delays_.insert(delays_.end(), |
2394 | 0 | nested_filter.delays_.begin(), |
2395 | 0 | nested_filter.delays_.end()); |
2396 | 0 | } |
2397 | | |
2398 | 18 | void CategoryFilter::Clear() { |
2399 | 18 | included_.clear(); |
2400 | 18 | disabled_.clear(); |
2401 | 18 | excluded_.clear(); |
2402 | 18 | } |
2403 | | |
2404 | | const CategoryFilter::StringList& |
2405 | 18 | CategoryFilter::GetSyntheticDelayValues() const { |
2406 | 18 | return delays_; |
2407 | 18 | } |
2408 | | |
2409 | 11.6k | void EnableTraceEvents() { |
2410 | 11.6k | trace_events_enabled.store(true); |
2411 | 11.6k | } |
2412 | | |
2413 | | } // namespace debug |
2414 | | } // namespace yb |
2415 | | |
2416 | | namespace trace_event_internal { |
2417 | | |
2418 | | ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( |
2419 | 0 | const char* category_group, const char* name) { |
2420 | | // The single atom works because for now the category_group can only be "gpu". |
2421 | 0 | DCHECK_EQ(strcmp(category_group, "gpu"), 0); |
2422 | 0 | static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; |
2423 | 0 | INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( |
2424 | 0 | category_group, atomic, category_group_enabled_); |
2425 | 0 | name_ = name; |
2426 | 0 | if (*category_group_enabled_) { |
2427 | 0 | event_handle_ = |
2428 | 0 | TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP( |
2429 | 0 | TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name, |
2430 | 0 | trace_event_internal::kNoEventId, |
2431 | 0 | static_cast<int>(yb::Thread::UniqueThreadId()), |
2432 | 0 | GetMonoTimeMicros(), |
2433 | 0 | 0, nullptr, nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE); |
2434 | 0 | } |
2435 | 0 | } |
2436 | | |
2437 | 0 | ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
2438 | 0 | if (*category_group_enabled_) { |
2439 | 0 | TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, |
2440 | 0 | name_, event_handle_); |
2441 | 0 | } |
2442 | 0 | } |
2443 | | |
2444 | 307k | int TraceEventThreadId() { |
2445 | 307k | return static_cast<int>(yb::Thread::UniqueThreadId()); |
2446 | 307k | } |
2447 | | |
2448 | | } // namespace trace_event_internal |