YugabyteDB (2.13.1.0-b60, 21121d69985fbf76aa6958d8f04a9bfa936293b5)

Coverage Report

Created: 2022-03-22 16:43

/Users/deen/code/yugabyte-db/src/yb/util/debug/trace_event_impl.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
//
5
// The following only applies to changes made to this file as part of YugaByte development.
6
//
7
// Portions Copyright (c) YugaByte, Inc.
8
//
9
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
10
// in compliance with the License.  You may obtain a copy of the License at
11
//
12
// http://www.apache.org/licenses/LICENSE-2.0
13
//
14
// Unless required by applicable law or agreed to in writing, software distributed under the License
15
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
16
// or implied.  See the License for the specific language governing permissions and limitations
17
// under the License.
18
//
19
20
#ifndef YB_UTIL_DEBUG_TRACE_EVENT_IMPL_H_
21
#define YB_UTIL_DEBUG_TRACE_EVENT_IMPL_H_
22
23
#include <stdint.h>
24
25
#include <cstdint>
26
#include <cstdlib>
27
#include <mutex>
28
#include <stack>
29
#include <string>
30
#include <unordered_map>
31
#include <unordered_set>
32
#include <vector>
33
34
#include <gflags/gflags_declare.h>
35
#include <gtest/gtest_prod.h>
36
37
#include "yb/gutil/atomicops.h"
38
#include "yb/gutil/callback.h"
39
#include "yb/gutil/integral_types.h"
40
#include "yb/gutil/ref_counted.h"
41
#include "yb/gutil/ref_counted_memory.h"
42
#include "yb/gutil/spinlock.h"
43
#include "yb/gutil/walltime.h"
44
45
#include "yb/util/mutex.h"
46
#include "yb/util/shared_lock.h"
47
#include "yb/util/threadlocal.h"
48
49
// Older style trace macros with explicit id and extra data
50
// Only these macros result in publishing data to ETW as currently implemented.
51
#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
52
    base::debug::TraceLog::AddTraceEventEtw( \
53
        TRACE_EVENT_PHASE_BEGIN, \
54
        name, reinterpret_cast<const void*>(id), extra)
55
56
#define TRACE_EVENT_END_ETW(name, id, extra) \
57
    base::debug::TraceLog::AddTraceEventEtw( \
58
        TRACE_EVENT_PHASE_END, \
59
        name, reinterpret_cast<const void*>(id), extra)
60
61
#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
62
    base::debug::TraceLog::AddTraceEventEtw( \
63
        TRACE_EVENT_PHASE_INSTANT, \
64
        name, reinterpret_cast<const void*>(id), extra)
65
66
template <typename Type>
67
class Singleton;
68
69
#if defined(COMPILER_GCC)
70
namespace BASE_HASH_NAMESPACE {
71
template <>
72
struct hash<yb::Thread*> {
73
  std::size_t operator()(yb::Thread* value) const {
74
    return reinterpret_cast<std::size_t>(value);
75
  }
76
};
77
}  // BASE_HASH_NAMESPACE
78
#endif
79
80
namespace yb {
81
82
class Thread;
83
84
namespace debug {
85
86
// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
87
// class must implement this interface.
88
class ConvertableToTraceFormat : public yb::RefCountedThreadSafe<ConvertableToTraceFormat> {
89
 public:
90
  // Append the class info to the provided |out| string. The appended
91
  // data must be a valid JSON object. Strings must be properly quoted, and
92
  // escaped. There is no processing applied to the content after it is
93
  // appended.
94
  virtual void AppendAsTraceFormat(std::string* out) const = 0;
95
96
 protected:
97
0
  virtual ~ConvertableToTraceFormat() {}
98
99
 private:
100
  friend class yb::RefCountedThreadSafe<ConvertableToTraceFormat>;
101
};
102
103
struct TraceEventHandle {
104
  uint32 chunk_seq = 0;
105
  uint16 chunk_index = 0;
106
  uint16 event_index = 0;
107
};
108
109
const int kTraceMaxNumArgs = 2;
110
111
class BASE_EXPORT TraceEvent {
112
 public:
113
  union TraceValue {
114
    bool as_bool;
115
    uint64_t as_uint;
116
    long long as_int;  // NOLINT(runtime/int)
117
    double as_double;
118
    const void* as_pointer;
119
    const char* as_string;
120
  };
121
122
  TraceEvent();
123
  ~TraceEvent();
124
125
  // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
126
  // Use explicit copy method to avoid accidentally misuse of copy.
127
  void CopyFrom(const TraceEvent& other);
128
129
  void Initialize(
130
      int64_t thread_id,
131
      MicrosecondsInt64 timestamp,
132
      MicrosecondsInt64 thread_timestamp,
133
      char phase,
134
      const unsigned char* category_group_enabled,
135
      const char* name,
136
      uint64_t id,
137
      int num_args,
138
      const char** arg_names,
139
      const unsigned char* arg_types,
140
      const uint64_t* arg_values,
141
      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
142
      unsigned char flags);
143
144
  void Reset();
145
146
  void UpdateDuration(const MicrosecondsInt64& now, const MicrosecondsInt64& thread_now);
147
148
  // Serialize event data to JSON
149
  void AppendAsJSON(std::string* out) const;
150
  void AppendPrettyPrinted(std::ostringstream* out) const;
151
152
  static void AppendValueAsJSON(unsigned char type,
153
                                TraceValue value,
154
                                std::string* out);
155
156
0
  MicrosecondsInt64 timestamp() const { return timestamp_; }
157
0
  MicrosecondsInt64 thread_timestamp() const { return thread_timestamp_; }
158
52.1k
  char phase() const { return phase_; }
159
1
  int64_t thread_id() const { return thread_id_; }
160
0
  MicrosecondsInt64 duration() const { return duration_; }
161
0
  MicrosecondsInt64 thread_duration() const { return thread_duration_; }
162
0
  uint64_t id() const { return id_; }
163
0
  unsigned char flags() const { return flags_; }
164
165
  // Exposed for unittesting:
166
167
0
  const yb::RefCountedString* parameter_copy_storage() const {
168
0
    return parameter_copy_storage_.get();
169
0
  }
170
171
0
  const unsigned char* category_group_enabled() const {
172
0
    return category_group_enabled_;
173
0
  }
174
175
0
  const char* name() const { return name_; }
176
177
#if defined(OS_ANDROID)
178
  void SendToATrace();
179
#endif
180
181
 private:
182
  // Note: these are ordered by size (largest first) for optimal packing.
183
  MicrosecondsInt64 timestamp_;
184
  MicrosecondsInt64 thread_timestamp_;
185
  MicrosecondsInt64 duration_;
186
  MicrosecondsInt64 thread_duration_;
187
  // id_ can be used to store phase-specific data.
188
  uint64_t id_;
189
  TraceValue arg_values_[kTraceMaxNumArgs];
190
  const char* arg_names_[kTraceMaxNumArgs];
191
  scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
192
  const unsigned char* category_group_enabled_;
193
  const char* name_;
194
  scoped_refptr<yb::RefCountedString> parameter_copy_storage_;
195
  int64_t thread_id_;
196
  char phase_;
197
  unsigned char flags_;
198
  unsigned char arg_types_[kTraceMaxNumArgs];
199
200
  DISALLOW_COPY_AND_ASSIGN(TraceEvent);
201
};
202
203
// TraceBufferChunk is the basic unit of TraceBuffer.
204
class BASE_EXPORT TraceBufferChunk {
205
 public:
206
  explicit TraceBufferChunk(uint32 seq)
207
      : next_free_(0),
208
4.98k
        seq_(seq) {
209
4.98k
  }
210
211
  void Reset(uint32 new_seq);
212
  TraceEvent* AddTraceEvent(size_t* event_index);
213
617k
  bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
214
215
363k
  uint32 seq() const { return seq_; }
216
0
  size_t capacity() const { return kTraceBufferChunkSize; }
217
691k
  size_t size() const { return next_free_; }
218
219
53.5k
  TraceEvent* GetEventAt(size_t index) {
220
53.5k
    DCHECK(index < size());
221
53.5k
    return &chunk_[index];
222
53.5k
  }
223
317k
  const TraceEvent* GetEventAt(size_t index) const {
224
317k
    DCHECK(index < size());
225
317k
    return &chunk_[index];
226
317k
  }
227
228
  std::unique_ptr<TraceBufferChunk> Clone() const;
229
230
  static const size_t kTraceBufferChunkSize = 64;
231
232
 private:
233
  size_t next_free_;
234
  TraceEvent chunk_[kTraceBufferChunkSize];
235
  uint32 seq_;
236
};
237
238
// TraceBuffer holds the events as they are collected.
239
class BASE_EXPORT TraceBuffer {
240
 public:
241
32
  virtual ~TraceBuffer() {}
242
243
  virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
244
  virtual void ReturnChunk(size_t index,
245
                           std::unique_ptr<TraceBufferChunk> chunk) = 0;
246
247
  virtual bool IsFull() const = 0;
248
  virtual size_t Size() const = 0;
249
  virtual size_t Capacity() const = 0;
250
  virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
251
252
  // For iteration. Each TraceBuffer can only be iterated once.
253
  virtual const TraceBufferChunk* NextChunk() = 0;
254
255
  virtual std::unique_ptr<TraceBuffer> CloneForIteration() const = 0;
256
};
257
258
// TraceResultBuffer collects and converts trace fragments returned by TraceLog
259
// to JSON output.
260
class TraceResultBuffer {
261
 public:
262
  static std::string FlushTraceLogToString();
263
  static std::string FlushTraceLogToStringButLeaveBufferIntact();
264
265
 private:
266
  TraceResultBuffer();
267
  ~TraceResultBuffer();
268
269
  static std::string DoFlush(bool leave_intact);
270
271
  // Callback for TraceLog::Flush
272
  void Collect(const scoped_refptr<RefCountedString>& s,
273
               bool has_more_events);
274
275
  bool first_;
276
  std::string json_;
277
};
278
279
class BASE_EXPORT CategoryFilter {
280
 public:
281
  typedef std::vector<std::string> StringList;
282
283
  // The default category filter, used when none is provided.
284
  // Allows all categories through, except if they end in the suffix 'Debug' or
285
  // 'Test'.
286
  static const char* kDefaultCategoryFilterString;
287
288
  // |filter_string| is a comma-delimited list of category wildcards.
289
  // A category can have an optional '-' prefix to make it an excluded category.
290
  // All the same rules apply above, so for example, having both included and
291
  // excluded categories in the same list would not be supported.
292
  //
293
  // Example: CategoryFilter"test_MyTest*");
294
  // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
295
  // Example: CategoryFilter("-excluded_category1,-excluded_category2");
296
  // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
297
  // Example: CategoryFilter("-webkit"); would enable everything but webkit.
298
  //
299
  // Category filters can also be used to configure synthetic delays.
300
  //
301
  // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
302
  //          buffers always take at least 16 ms.
303
  // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
304
  //          make swap buffers take at least 16 ms the first time it is
305
  //          called.
306
  // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
307
  //          would make swap buffers take at least 16 ms every other time it
308
  //          is called.
309
  explicit CategoryFilter(const std::string& filter_string);
310
311
  CategoryFilter(const CategoryFilter& cf);
312
313
  ~CategoryFilter();
314
315
  CategoryFilter& operator=(const CategoryFilter& rhs);
316
317
  // Writes the string representation of the CategoryFilter. This is a comma
318
  // separated string, similar in nature to the one used to determine
319
  // enabled/disabled category patterns, except here there is an arbitrary
320
  // order, included categories go first, then excluded categories. Excluded
321
  // categories are distinguished from included categories by the prefix '-'.
322
  std::string ToString() const;
323
324
  // Determines whether category group would be enabled or
325
  // disabled by this category filter.
326
  bool IsCategoryGroupEnabled(const char* category_group) const;
327
328
  // Return a list of the synthetic delays specified in this category filter.
329
  const StringList& GetSyntheticDelayValues() const;
330
331
  // Merges nested_filter with the current CategoryFilter
332
  void Merge(const CategoryFilter& nested_filter);
333
334
  // Clears both included/excluded pattern lists. This would be equivalent to
335
  // creating a CategoryFilter with an empty string, through the constructor.
336
  // i.e: CategoryFilter("").
337
  //
338
  // When using an empty filter, all categories are considered included as we
339
  // are not excluding anything.
340
  void Clear();
341
342
 private:
343
  FRIEND_TEST(TraceEventTestFixture, CategoryFilter);
344
345
  static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
346
      const std::string& str);
347
348
  void Initialize(const std::string& filter_string);
349
  void WriteString(const StringList& values,
350
                   std::string* out,
351
                   bool included) const;
352
  void WriteString(const StringList& delays, std::string* out) const;
353
  bool HasIncludedPatterns() const;
354
355
  bool DoesCategoryGroupContainCategory(const char* category_group,
356
                                        const char* category) const;
357
358
  StringList included_;
359
  StringList disabled_;
360
  StringList excluded_;
361
  StringList delays_;
362
};
363
364
class TraceSamplingThread;
365
366
class BASE_EXPORT TraceLog {
367
 public:
368
  enum Mode {
369
    DISABLED = 0,
370
    RECORDING_MODE,
371
    MONITORING_MODE,
372
  };
373
374
  // Options determines how the trace buffer stores data.
375
  enum Options {
376
    // Record until the trace buffer is full.
377
    RECORD_UNTIL_FULL = 1 << 0,
378
379
    // Record until the user ends the trace. The trace buffer is a fixed size
380
    // and we use it as a ring buffer during recording.
381
    RECORD_CONTINUOUSLY = 1 << 1,
382
383
    // Enable the sampling profiler in the recording mode.
384
    ENABLE_SAMPLING = 1 << 2,
385
386
    // Echo to console. Events are discarded.
387
    ECHO_TO_CONSOLE = 1 << 3,
388
  };
389
390
  // The pointer returned from GetCategoryGroupEnabledInternal() points to a
391
  // value with zero or more of the following bits. Used in this class only.
392
  // The TRACE_EVENT macros should only use the value as a bool.
393
  // These values must be in sync with macro values in TraceEvent.h in Blink.
394
  enum CategoryGroupEnabledFlags {
395
    // Category group enabled for the recording mode.
396
    ENABLED_FOR_RECORDING = 1 << 0,
397
    // Category group enabled for the monitoring mode.
398
    ENABLED_FOR_MONITORING = 1 << 1,
399
    // Category group enabled by SetEventCallbackEnabled().
400
    ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
401
  };
402
403
  static TraceLog* GetInstance();
404
405
  // Get set of known category groups. This can change as new code paths are
406
  // reached. The known category groups are inserted into |category_groups|.
407
  void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
408
409
  // Retrieves a copy (for thread-safety) of the current CategoryFilter.
410
  CategoryFilter GetCurrentCategoryFilter();
411
412
390k
  Options trace_options() const {
413
390k
    return static_cast<Options>(base::subtle::NoBarrier_Load(&trace_options_));
414
390k
  }
415
416
  // Enables normal tracing (recording trace events in the trace buffer).
417
  // See CategoryFilter comments for details on how to control what categories
418
  // will be traced. If tracing has already been enabled, |category_filter| will
419
  // be merged into the current category filter.
420
  void SetEnabled(const CategoryFilter& category_filter,
421
                  Mode mode, Options options);
422
423
  // Disables normal tracing for all categories.
424
  void SetDisabled();
425
426
66
  bool IsEnabled() { return mode_ != DISABLED; }
427
428
  // The number of times we have begun recording traces. If tracing is off,
429
  // returns -1. If tracing is on, then it returns the number of times we have
430
  // recorded a trace. By watching for this number to increment, you can
431
  // passively discover when a new trace has begun. This is then used to
432
  // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
433
  int GetNumTracesRecorded();
434
435
#if defined(OS_ANDROID)
436
  void StartATrace();
437
  void StopATrace();
438
  void AddClockSyncMetadataEvent();
439
#endif
440
441
  // Enabled state listeners give a callback when tracing is enabled or
442
  // disabled. This can be used to tie into other library's tracing systems
443
  // on-demand.
444
  class EnabledStateObserver {
445
   public:
446
    virtual ~EnabledStateObserver();
447
448
    // Called just after the tracing system becomes enabled, outside of the
449
    // |lock_|. TraceLog::IsEnabled() is true at this point.
450
    virtual void OnTraceLogEnabled() = 0;
451
452
    // Called just after the tracing system disables, outside of the |lock_|.
453
    // TraceLog::IsEnabled() is false at this point.
454
    virtual void OnTraceLogDisabled() = 0;
455
  };
456
  void AddEnabledStateObserver(EnabledStateObserver* listener);
457
  void RemoveEnabledStateObserver(EnabledStateObserver* listener);
458
  bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
459
460
  float GetBufferPercentFull() const;
461
  bool BufferIsFull() const;
462
463
  // Not using yb::Callback because of its limited by 7 parameters.
464
  // Also, using primitive type allows directly passing callback from WebCore.
465
  // WARNING: It is possible for the previously set callback to be called
466
  // after a call to SetEventCallbackEnabled() that replaces or a call to
467
  // SetEventCallbackDisabled() that disables the callback.
468
  // This callback may be invoked on any thread.
469
  // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
470
  // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
471
  // interface simple.
472
  typedef void (*EventCallback)(MicrosecondsInt64 timestamp,
473
                                char phase,
474
                                const unsigned char* category_group_enabled,
475
                                const char* name,
476
                                uint64_t id,
477
                                int num_args,
478
                                const char* const arg_names[],
479
                                const unsigned char arg_types[],
480
                                const uint64_t arg_values[],
481
                                unsigned char flags);
482
483
  // Enable tracing for EventCallback.
484
  void SetEventCallbackEnabled(const CategoryFilter& category_filter,
485
                               EventCallback cb);
486
  void SetEventCallbackDisabled();
487
488
  // Flush all collected events to the given output callback. The callback will
489
  // be called one or more times synchronously from
490
  // the current thread with IPC-bite-size chunks. The string format is
491
  // undefined. Use TraceResultBuffer to convert one or more trace strings to
492
  // JSON. The callback can be null if the caller doesn't want any data.
493
  // Due to the implementation of thread-local buffers, flush can't be
494
  // done when tracing is enabled. If called when tracing is enabled, the
495
  // callback will be called directly with (empty_string, false) to indicate
496
  // the end of this unsuccessful flush.
497
  typedef yb::Callback<void(const scoped_refptr<yb::RefCountedString>&,
498
                              bool has_more_events)> OutputCallback;
499
  void Flush(const OutputCallback& cb);
500
  void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
501
502
  // Called by TRACE_EVENT* macros, don't call this directly.
503
  // The name parameter is a category group for example:
504
  // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
505
  static const unsigned char* GetCategoryGroupEnabled(const char* name);
506
  static const char* GetCategoryGroupName(
507
      const unsigned char* category_group_enabled);
508
509
  // Called by TRACE_EVENT* macros, don't call this directly.
510
  // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
511
  // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
512
  TraceEventHandle AddTraceEvent(
513
      char phase,
514
      const unsigned char* category_group_enabled,
515
      const char* name,
516
      uint64_t id,
517
      int num_args,
518
      const char** arg_names,
519
      const unsigned char* arg_types,
520
      const uint64_t* arg_values,
521
      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
522
      unsigned char flags);
523
  TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
524
      char phase,
525
      const unsigned char* category_group_enabled,
526
      const char* name,
527
      uint64_t id,
528
      int64_t thread_id,
529
      const MicrosecondsInt64& timestamp,
530
      int num_args,
531
      const char** arg_names,
532
      const unsigned char* arg_types,
533
      const uint64_t* arg_values,
534
      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
535
      unsigned char flags);
536
  static void AddTraceEventEtw(char phase,
537
                               const char* category_group,
538
                               const void* id,
539
                               const char* extra);
540
  static void AddTraceEventEtw(char phase,
541
                               const char* category_group,
542
                               const void* id,
543
                               const std::string& extra);
544
545
  void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
546
                                const char* name,
547
                                TraceEventHandle handle);
548
549
  // For every matching event, the callback will be called.
550
  typedef yb::Callback<void()> WatchEventCallback;
551
  void SetWatchEvent(const std::string& category_name,
552
                     const std::string& event_name,
553
                     const WatchEventCallback& callback);
554
  // Cancel the watch event. If tracing is enabled, this may race with the
555
  // watch event notification firing.
556
  void CancelWatchEvent();
557
558
317k
  int process_id() const { return process_id_; }
559
560
  // Allow tests to inspect TraceEvents.
561
0
  size_t GetEventsSize() const { return logged_events_->Size(); }
562
  TraceEvent* GetEventByHandle(TraceEventHandle handle);
563
564
  void SetProcessID(int process_id);
565
566
  // Process sort indices, if set, override the order of a process will appear
567
  // relative to other processes in the trace viewer. Processes are sorted first
568
  // on their sort index, ascending, then by their name, and then tid.
569
  void SetProcessSortIndex(int sort_index);
570
571
  // Sets the name of the process.
572
  void SetProcessName(const std::string& process_name);
573
574
  // Processes can have labels in addition to their names. Use labels, for
575
  // instance, to list out the web page titles that a process is handling.
576
  void UpdateProcessLabel(int label_id, const std::string& current_label);
577
  void RemoveProcessLabel(int label_id);
578
579
  // Thread sort indices, if set, override the order of a thread will appear
580
  // within its process in the trace viewer. Threads are sorted first on their
581
  // sort index, ascending, then by their name, and then tid.
582
  void SetThreadSortIndex(int64_t tid , int sort_index);
583
584
  // Allow setting an offset between the current MicrosecondsInt64 time and the time
585
  // that should be reported.
586
  void SetTimeOffset(MicrosecondsInt64 offset);
587
588
  size_t GetObserverCountForTest() const;
589
590
591
 private:
592
  FRIEND_TEST(TraceEventTestFixture,
593
                           TraceBufferRingBufferGetReturnChunk);
594
  FRIEND_TEST(TraceEventTestFixture,
595
                           TraceBufferRingBufferHalfIteration);
596
  FRIEND_TEST(TraceEventTestFixture,
597
                           TraceBufferRingBufferFullIteration);
598
599
  // This allows constructor and destructor to be private and usable only
600
  // by the Singleton class.
601
  friend class Singleton<TraceLog>;
602
603
  // Enable/disable each category group based on the current mode_,
604
  // category_filter_, event_callback_ and event_callback_category_filter_.
605
  // Enable the category group in the enabled mode if category_filter_ matches
606
  // the category group, or event_callback_ is not null and
607
  // event_callback_category_filter_ matches the category group.
608
  void UpdateCategoryGroupEnabledFlags();
609
  void UpdateCategoryGroupEnabledFlag(AtomicWord category_index);
610
611
  // Configure synthetic delays based on the values set in the current
612
  // category filter.
613
  void UpdateSyntheticDelaysFromCategoryFilter();
614
615
  struct PerThreadInfo;
616
  class OptionalAutoLock;
617
  class ThreadLocalEventBuffer;
618
619
  TraceLog();
620
  ~TraceLog();
621
  const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
622
  void AddMetadataEventsWhileLocked();
623
624
0
  TraceBuffer* trace_buffer() const { return logged_events_.get(); }
625
  TraceBuffer* CreateTraceBuffer();
626
627
  std::string EventToConsoleMessage(unsigned char phase,
628
                                    const MicrosecondsInt64& timestamp,
629
                                    TraceEvent* trace_event);
630
631
  TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
632
                                                     bool check_buffer_is_full);
633
  void CheckIfBufferIsFullWhileLocked();
634
  void SetDisabledWhileLocked();
635
636
  TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
637
                                       OptionalAutoLock* lock);
638
639
  void ConvertTraceEventsToTraceFormat(std::unique_ptr<TraceBuffer> logged_events,
640
                                       const OutputCallback& flush_output_callback);
641
  void FinishFlush(int generation,
642
                   const OutputCallback& flush_output_callback);
643
644
  // Called when a thread which has registered trace events is about to exit.
645
  void ThreadExiting();
646
647
313k
  int generation() const {
648
313k
    return static_cast<int>(base::subtle::NoBarrier_Load(&generation_));
649
313k
  }
650
313k
  bool CheckGeneration(int generation) const {
651
313k
    return generation == this->generation();
652
313k
  }
653
  void UseNextTraceBuffer();
654
655
58.7k
  MicrosecondsInt64 OffsetNow() const {
656
58.7k
    return OffsetTimestamp(GetMonoTimeMicros());
657
58.7k
  }
658
362k
  MicrosecondsInt64 OffsetTimestamp(const MicrosecondsInt64& timestamp) const {
659
362k
    return timestamp - time_offset_;
660
362k
  }
661
662
  // Create a new PerThreadInfo object for the current thread,
663
  // and register it in the active_threads_ list.
664
  PerThreadInfo* SetupThreadLocalBuffer();
665
666
  // This lock protects TraceLog member accesses (except for members protected
667
  // by thread_info_lock_) from arbitrary threads.
668
  mutable base::SpinLock lock_;
669
  // This lock protects accesses to thread_names_, thread_event_start_times_
670
  // and thread_colors_.
671
  base::SpinLock thread_info_lock_;
672
  int locked_line_;
673
  Mode mode_;
674
  int num_traces_recorded_;
675
  std::unique_ptr<TraceBuffer> logged_events_;
676
  AtomicWord /* EventCallback */ event_callback_;
677
  bool dispatching_to_observer_list_;
678
  std::vector<EnabledStateObserver*> enabled_state_observer_list_;
679
680
  std::string process_name_;
681
  std::unordered_map<int, std::string> process_labels_;
682
  int process_sort_index_;
683
  std::unordered_map<int, int> thread_sort_indices_;
684
  std::unordered_map<int64_t, std::string> thread_names_;
685
686
  // The following two maps are used only when ECHO_TO_CONSOLE.
687
  std::unordered_map<int64_t, std::stack<MicrosecondsInt64> > thread_event_start_times_;
688
  std::unordered_map<std::string, int> thread_colors_;
689
690
  // XORed with TraceID to make it unlikely to collide with other processes.
691
  uint64_t process_id_hash_;
692
693
  int process_id_;
694
695
  MicrosecondsInt64 time_offset_;
696
697
  // Allow tests to wake up when certain events occur.
698
  WatchEventCallback watch_event_callback_;
699
  AtomicWord /* const unsigned char* */ watch_category_;
700
  std::string watch_event_name_;
701
702
  AtomicWord /* Options */ trace_options_;
703
704
  // Sampling thread handles.
705
  std::unique_ptr<TraceSamplingThread> sampling_thread_;
706
  scoped_refptr<Thread> sampling_thread_handle_;
707
708
  CategoryFilter category_filter_;
709
  CategoryFilter event_callback_category_filter_;
710
711
  struct PerThreadInfo {
712
    ThreadLocalEventBuffer* event_buffer_;
713
    base::subtle::Atomic32 is_in_trace_event_;
714
715
    // Atomically take the event_buffer_ member, setting it to NULL.
716
    // Returns the old value of the member.
717
    ThreadLocalEventBuffer* AtomicTakeBuffer();
718
  };
719
  static __thread PerThreadInfo* thread_local_info_;
720
721
  Mutex active_threads_lock_;
722
  // Map of PID -> PerThreadInfo
723
  // Protected by active_threads_lock_.
724
  typedef std::unordered_map<int64_t, PerThreadInfo*> ActiveThreadMap;
725
  ActiveThreadMap active_threads_;
726
727
  // For events which can't be added into the thread local buffer, e.g. events
728
  // from threads without a message loop.
729
  std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
730
  size_t thread_shared_chunk_index_;
731
732
  // The generation is incremented whenever tracing is enabled, and incremented
733
  // again when the buffers are flushed. This ensures that trace events logged
734
  // for a previous tracing session do not get accidentally flushed in the
735
  // next tracing session.
736
  AtomicWord generation_;
737
738
  DISALLOW_COPY_AND_ASSIGN(TraceLog);
739
};
740
741
extern std::atomic<bool> trace_events_enabled;
742
743
54.6k
inline bool TraceEventsEnabled() {
744
54.6k
  return trace_events_enabled.load(std::memory_order_relaxed);
745
54.6k
}
746
747
void EnableTraceEvents();
748
749
}  // namespace debug
750
}  // namespace yb
751
752
#endif // YB_UTIL_DEBUG_TRACE_EVENT_IMPL_H_