1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/trace_log.h"
6
7 #include <algorithm>
8 #include <cmath>
9 #include <utility>
10
11 #include "base/base_switches.h"
12 #include "base/bind.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/macros.h"
17 #include "base/memory/scoped_ptr.h"
18 #include "base/memory/singleton.h"
19 #include "base/process/process_metrics.h"
20 #include "base/stl_util.h"
21 #include "base/strings/string_split.h"
22 #include "base/strings/string_tokenizer.h"
23 #include "base/strings/stringprintf.h"
24 #include "base/sys_info.h"
25 #include "base/thread_task_runner_handle.h"
26 #include "base/threading/platform_thread.h"
27 #include "base/threading/thread_id_name_manager.h"
28 #include "base/threading/worker_pool.h"
29 #include "base/time/time.h"
30 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
31 #include "base/trace_event/memory_dump_manager.h"
32 #include "base/trace_event/memory_dump_provider.h"
33 #include "base/trace_event/process_memory_dump.h"
34 #include "base/trace_event/trace_buffer.h"
35 #include "base/trace_event/trace_event.h"
36 #include "base/trace_event/trace_event_synthetic_delay.h"
37 #include "base/trace_event/trace_sampling_thread.h"
38 #include "build/build_config.h"
39
40 #if defined(OS_WIN)
41 #include "base/trace_event/trace_event_etw_export_win.h"
42 #endif
43
44 // The thread buckets for the sampling profiler.
45 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
46
47 namespace base {
48 namespace internal {
49
50 class DeleteTraceLogForTesting {
51 public:
Delete()52 static void Delete() {
53 Singleton<trace_event::TraceLog,
54 LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0);
55 }
56 };
57
58 } // namespace internal
59
60 namespace trace_event {
61
62 namespace {
63
64 // Controls the number of trace events we will buffer in-memory
65 // before throwing them away.
66 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
67
68 const size_t kTraceEventVectorBigBufferChunks =
69 512000000 / kTraceBufferChunkSize;
70 static_assert(
71 kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
72 "Too many big buffer chunks");
73 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
74 static_assert(
75 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
76 "Too many vector buffer chunks");
77 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
78
79 // Can store results for 30 seconds with 1 ms sampling interval.
80 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
81 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
82 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
83
84 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
85 const int kThreadFlushTimeoutMs = 3000;
86
87 #define MAX_CATEGORY_GROUPS 100
88
89 // Parallel arrays g_category_groups and g_category_group_enabled are separate
90 // so that a pointer to a member of g_category_group_enabled can be easily
91 // converted to an index into g_category_groups. This allows macros to deal
92 // only with char enabled pointers from g_category_group_enabled, and we can
93 // convert internally to determine the category name from the char enabled
94 // pointer.
95 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
96 "toplevel",
97 "tracing already shutdown",
98 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
99 "__metadata"};
100
101 // The enabled flag is char instead of bool so that the API can be used from C.
102 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
103 // Indexes here have to match the g_category_groups array indexes above.
104 const int g_category_already_shutdown = 1;
105 const int g_category_categories_exhausted = 2;
106 const int g_category_metadata = 3;
107 const int g_num_builtin_categories = 4;
108 // Skip default categories.
109 base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
110
111 // The name of the current thread. This is used to decide if the current
112 // thread name has changed. We combine all the seen thread names into the
113 // output name for the thread.
114 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
115 LAZY_INSTANCE_INITIALIZER;
116
ThreadNow()117 ThreadTicks ThreadNow() {
118 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
119 }
120
121 template <typename T>
InitializeMetadataEvent(TraceEvent * trace_event,int thread_id,const char * metadata_name,const char * arg_name,const T & value)122 void InitializeMetadataEvent(TraceEvent* trace_event,
123 int thread_id,
124 const char* metadata_name,
125 const char* arg_name,
126 const T& value) {
127 if (!trace_event)
128 return;
129
130 int num_args = 1;
131 unsigned char arg_type;
132 unsigned long long arg_value;
133 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
134 trace_event->Initialize(
135 thread_id,
136 TimeTicks(),
137 ThreadTicks(),
138 TRACE_EVENT_PHASE_METADATA,
139 &g_category_group_enabled[g_category_metadata],
140 metadata_name,
141 trace_event_internal::kNoId, // id
142 trace_event_internal::kNoId, // bind_id
143 num_args,
144 &arg_name,
145 &arg_type,
146 &arg_value,
147 nullptr,
148 TRACE_EVENT_FLAG_NONE);
149 }
150
151 class AutoThreadLocalBoolean {
152 public:
AutoThreadLocalBoolean(ThreadLocalBoolean * thread_local_boolean)153 explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
154 : thread_local_boolean_(thread_local_boolean) {
155 DCHECK(!thread_local_boolean_->Get());
156 thread_local_boolean_->Set(true);
157 }
~AutoThreadLocalBoolean()158 ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
159
160 private:
161 ThreadLocalBoolean* thread_local_boolean_;
162 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
163 };
164
165 // Use this function instead of TraceEventHandle constructor to keep the
166 // overhead of ScopedTracer (trace_event.h) constructor minimum.
MakeHandle(uint32_t chunk_seq,size_t chunk_index,size_t event_index,TraceEventHandle * handle)167 void MakeHandle(uint32_t chunk_seq,
168 size_t chunk_index,
169 size_t event_index,
170 TraceEventHandle* handle) {
171 DCHECK(chunk_seq);
172 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
173 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
174 handle->chunk_seq = chunk_seq;
175 handle->chunk_index = static_cast<uint16_t>(chunk_index);
176 handle->event_index = static_cast<uint16_t>(event_index);
177 }
178
179 } // namespace
180
181 // A helper class that allows the lock to be acquired in the middle of the scope
182 // and unlocks at the end of scope if locked.
183 class TraceLog::OptionalAutoLock {
184 public:
OptionalAutoLock(Lock * lock)185 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
186
~OptionalAutoLock()187 ~OptionalAutoLock() {
188 if (locked_)
189 lock_->Release();
190 }
191
EnsureAcquired()192 void EnsureAcquired() {
193 if (!locked_) {
194 lock_->Acquire();
195 locked_ = true;
196 }
197 }
198
199 private:
200 Lock* lock_;
201 bool locked_;
202 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
203 };
204
205 class TraceLog::ThreadLocalEventBuffer
206 : public MessageLoop::DestructionObserver,
207 public MemoryDumpProvider {
208 public:
209 explicit ThreadLocalEventBuffer(TraceLog* trace_log);
210 ~ThreadLocalEventBuffer() override;
211
212 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
213
GetEventByHandle(TraceEventHandle handle)214 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
215 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
216 handle.chunk_index != chunk_index_) {
217 return nullptr;
218 }
219
220 return chunk_->GetEventAt(handle.event_index);
221 }
222
generation() const223 int generation() const { return generation_; }
224
225 private:
226 // MessageLoop::DestructionObserver
227 void WillDestroyCurrentMessageLoop() override;
228
229 // MemoryDumpProvider implementation.
230 bool OnMemoryDump(const MemoryDumpArgs& args,
231 ProcessMemoryDump* pmd) override;
232
233 void FlushWhileLocked();
234
CheckThisIsCurrentBuffer() const235 void CheckThisIsCurrentBuffer() const {
236 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
237 }
238
239 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
240 // as long as the thread exists.
241 TraceLog* trace_log_;
242 scoped_ptr<TraceBufferChunk> chunk_;
243 size_t chunk_index_;
244 int generation_;
245
246 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
247 };
248
ThreadLocalEventBuffer(TraceLog * trace_log)249 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
250 : trace_log_(trace_log),
251 chunk_index_(0),
252 generation_(trace_log->generation()) {
253 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
254 // the following message_loop won't be NULL.
255 MessageLoop* message_loop = MessageLoop::current();
256 message_loop->AddDestructionObserver(this);
257
258 // This is to report the local memory usage when memory-infra is enabled.
259 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
260 this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
261
262 AutoLock lock(trace_log->lock_);
263 trace_log->thread_message_loops_.insert(message_loop);
264 }
265
~ThreadLocalEventBuffer()266 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
267 CheckThisIsCurrentBuffer();
268 MessageLoop::current()->RemoveDestructionObserver(this);
269 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
270
271 {
272 AutoLock lock(trace_log_->lock_);
273 FlushWhileLocked();
274 trace_log_->thread_message_loops_.erase(MessageLoop::current());
275 }
276 trace_log_->thread_local_event_buffer_.Set(NULL);
277 }
278
AddTraceEvent(TraceEventHandle * handle)279 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
280 TraceEventHandle* handle) {
281 CheckThisIsCurrentBuffer();
282
283 if (chunk_ && chunk_->IsFull()) {
284 AutoLock lock(trace_log_->lock_);
285 FlushWhileLocked();
286 chunk_.reset();
287 }
288 if (!chunk_) {
289 AutoLock lock(trace_log_->lock_);
290 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
291 trace_log_->CheckIfBufferIsFullWhileLocked();
292 }
293 if (!chunk_)
294 return NULL;
295
296 size_t event_index;
297 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
298 if (trace_event && handle)
299 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
300
301 return trace_event;
302 }
303
WillDestroyCurrentMessageLoop()304 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
305 delete this;
306 }
307
OnMemoryDump(const MemoryDumpArgs &,ProcessMemoryDump * pmd)308 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(
309 const MemoryDumpArgs& /* args */,
310 ProcessMemoryDump* pmd) {
311 if (!chunk_)
312 return true;
313 std::string dump_base_name = StringPrintf(
314 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
315 TraceEventMemoryOverhead overhead;
316 chunk_->EstimateTraceMemoryOverhead(&overhead);
317 overhead.DumpInto(dump_base_name.c_str(), pmd);
318 return true;
319 }
320
FlushWhileLocked()321 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
322 if (!chunk_)
323 return;
324
325 trace_log_->lock_.AssertAcquired();
326 if (trace_log_->CheckGeneration(generation_)) {
327 // Return the chunk to the buffer only if the generation matches.
328 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
329 }
330 // Otherwise this method may be called from the destructor, or TraceLog will
331 // find the generation mismatch and delete this buffer soon.
332 }
333
TraceLogStatus()334 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
335
~TraceLogStatus()336 TraceLogStatus::~TraceLogStatus() {}
337
338 // static
GetInstance()339 TraceLog* TraceLog::GetInstance() {
340 return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get();
341 }
342
TraceLog()343 TraceLog::TraceLog()
344 : mode_(DISABLED),
345 num_traces_recorded_(0),
346 event_callback_(0),
347 dispatching_to_observer_list_(false),
348 process_sort_index_(0),
349 process_id_hash_(0),
350 process_id_(0),
351 watch_category_(0),
352 trace_options_(kInternalRecordUntilFull),
353 sampling_thread_handle_(0),
354 trace_config_(TraceConfig()),
355 event_callback_trace_config_(TraceConfig()),
356 thread_shared_chunk_index_(0),
357 generation_(0),
358 use_worker_thread_(false) {
359 // Trace is enabled or disabled on one thread while other threads are
360 // accessing the enabled flag. We don't care whether edge-case events are
361 // traced or not, so we allow races on the enabled flag to keep the trace
362 // macros fast.
363 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
364 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
365 // sizeof(g_category_group_enabled),
366 // "trace_event category enabled");
367 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
368 SetProcessID(0);
369 #else
370 SetProcessID(static_cast<int>(GetCurrentProcId()));
371 #endif
372
373 logged_events_.reset(CreateTraceBuffer());
374
375 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
376 nullptr);
377 }
378
~TraceLog()379 TraceLog::~TraceLog() {}
380
InitializeThreadLocalEventBufferIfSupported()381 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
382 // A ThreadLocalEventBuffer needs the message loop
383 // - to know when the thread exits;
384 // - to handle the final flush.
385 // For a thread without a message loop or the message loop may be blocked, the
386 // trace events will be added into the main buffer directly.
387 if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
388 return;
389 auto thread_local_event_buffer = thread_local_event_buffer_.Get();
390 if (thread_local_event_buffer &&
391 !CheckGeneration(thread_local_event_buffer->generation())) {
392 delete thread_local_event_buffer;
393 thread_local_event_buffer = NULL;
394 }
395 if (!thread_local_event_buffer) {
396 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
397 thread_local_event_buffer_.Set(thread_local_event_buffer);
398 }
399 }
400
OnMemoryDump(const MemoryDumpArgs &,ProcessMemoryDump * pmd)401 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& /* args */,
402 ProcessMemoryDump* pmd) {
403 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
404 // (crbug.com/499731).
405 TraceEventMemoryOverhead overhead;
406 overhead.Add("TraceLog", sizeof(*this));
407 {
408 AutoLock lock(lock_);
409 if (logged_events_)
410 logged_events_->EstimateTraceMemoryOverhead(&overhead);
411
412 for (auto& metadata_event : metadata_events_)
413 metadata_event->EstimateTraceMemoryOverhead(&overhead);
414 }
415 overhead.AddSelf();
416 overhead.DumpInto("tracing/main_trace_log", pmd);
417 return true;
418 }
419
GetCategoryGroupEnabled(const char * category_group)420 const unsigned char* TraceLog::GetCategoryGroupEnabled(
421 const char* category_group) {
422 TraceLog* tracelog = GetInstance();
423 if (!tracelog) {
424 DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
425 return &g_category_group_enabled[g_category_already_shutdown];
426 }
427 return tracelog->GetCategoryGroupEnabledInternal(category_group);
428 }
429
GetCategoryGroupName(const unsigned char * category_group_enabled)430 const char* TraceLog::GetCategoryGroupName(
431 const unsigned char* category_group_enabled) {
432 // Calculate the index of the category group by finding
433 // category_group_enabled in g_category_group_enabled array.
434 uintptr_t category_begin =
435 reinterpret_cast<uintptr_t>(g_category_group_enabled);
436 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
437 DCHECK(category_ptr >= category_begin &&
438 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
439 MAX_CATEGORY_GROUPS))
440 << "out of bounds category pointer";
441 uintptr_t category_index =
442 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
443 return g_category_groups[category_index];
444 }
445
UpdateCategoryGroupEnabledFlag(size_t category_index)446 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
447 unsigned char enabled_flag = 0;
448 const char* category_group = g_category_groups[category_index];
449 if (mode_ == RECORDING_MODE &&
450 trace_config_.IsCategoryGroupEnabled(category_group))
451 enabled_flag |= ENABLED_FOR_RECORDING;
452 else if (mode_ == MONITORING_MODE &&
453 trace_config_.IsCategoryGroupEnabled(category_group))
454 enabled_flag |= ENABLED_FOR_MONITORING;
455 if (event_callback_ &&
456 event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
457 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
458 #if defined(OS_WIN)
459 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
460 category_group)) {
461 enabled_flag |= ENABLED_FOR_ETW_EXPORT;
462 }
463 #endif
464
465 g_category_group_enabled[category_index] = enabled_flag;
466 }
467
UpdateCategoryGroupEnabledFlags()468 void TraceLog::UpdateCategoryGroupEnabledFlags() {
469 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
470 for (size_t i = 0; i < category_index; i++)
471 UpdateCategoryGroupEnabledFlag(i);
472 }
473
UpdateSyntheticDelaysFromTraceConfig()474 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
475 ResetTraceEventSyntheticDelays();
476 const TraceConfig::StringList& delays =
477 trace_config_.GetSyntheticDelayValues();
478 TraceConfig::StringList::const_iterator ci;
479 for (ci = delays.begin(); ci != delays.end(); ++ci) {
480 StringTokenizer tokens(*ci, ";");
481 if (!tokens.GetNext())
482 continue;
483 TraceEventSyntheticDelay* delay =
484 TraceEventSyntheticDelay::Lookup(tokens.token());
485 while (tokens.GetNext()) {
486 std::string token = tokens.token();
487 char* duration_end;
488 double target_duration = strtod(token.c_str(), &duration_end);
489 if (duration_end != token.c_str()) {
490 delay->SetTargetDuration(TimeDelta::FromMicroseconds(
491 static_cast<int64_t>(target_duration * 1e6)));
492 } else if (token == "static") {
493 delay->SetMode(TraceEventSyntheticDelay::STATIC);
494 } else if (token == "oneshot") {
495 delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
496 } else if (token == "alternating") {
497 delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
498 }
499 }
500 }
501 }
502
GetCategoryGroupEnabledInternal(const char * category_group)503 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
504 const char* category_group) {
505 DCHECK(!strchr(category_group, '"'))
506 << "Category groups may not contain double quote";
507 // The g_category_groups is append only, avoid using a lock for the fast path.
508 size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
509
510 // Search for pre-existing category group.
511 for (size_t i = 0; i < current_category_index; ++i) {
512 if (strcmp(g_category_groups[i], category_group) == 0) {
513 return &g_category_group_enabled[i];
514 }
515 }
516
517 unsigned char* category_group_enabled = NULL;
518 // This is the slow path: the lock is not held in the case above, so more
519 // than one thread could have reached here trying to add the same category.
520 // Only hold to lock when actually appending a new category, and
521 // check the categories groups again.
522 AutoLock lock(lock_);
523 size_t category_index = base::subtle::Acquire_Load(&g_category_index);
524 for (size_t i = 0; i < category_index; ++i) {
525 if (strcmp(g_category_groups[i], category_group) == 0) {
526 return &g_category_group_enabled[i];
527 }
528 }
529
530 // Create a new category group.
531 DCHECK(category_index < MAX_CATEGORY_GROUPS)
532 << "must increase MAX_CATEGORY_GROUPS";
533 if (category_index < MAX_CATEGORY_GROUPS) {
534 // Don't hold on to the category_group pointer, so that we can create
535 // category groups with strings not known at compile time (this is
536 // required by SetWatchEvent).
537 const char* new_group = strdup(category_group);
538 g_category_groups[category_index] = new_group;
539 DCHECK(!g_category_group_enabled[category_index]);
540 // Note that if both included and excluded patterns in the
541 // TraceConfig are empty, we exclude nothing,
542 // thereby enabling this category group.
543 UpdateCategoryGroupEnabledFlag(category_index);
544 category_group_enabled = &g_category_group_enabled[category_index];
545 // Update the max index now.
546 base::subtle::Release_Store(&g_category_index, category_index + 1);
547 } else {
548 category_group_enabled =
549 &g_category_group_enabled[g_category_categories_exhausted];
550 }
551 return category_group_enabled;
552 }
553
GetKnownCategoryGroups(std::vector<std::string> * category_groups)554 void TraceLog::GetKnownCategoryGroups(
555 std::vector<std::string>* category_groups) {
556 AutoLock lock(lock_);
557 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
558 for (size_t i = g_num_builtin_categories; i < category_index; i++)
559 category_groups->push_back(g_category_groups[i]);
560 }
561
SetEnabled(const TraceConfig & trace_config,Mode mode)562 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
563 std::vector<EnabledStateObserver*> observer_list;
564 {
565 AutoLock lock(lock_);
566
567 // Can't enable tracing when Flush() is in progress.
568 DCHECK(!flush_task_runner_);
569
570 InternalTraceOptions new_options =
571 GetInternalOptionsFromTraceConfig(trace_config);
572
573 InternalTraceOptions old_options = trace_options();
574
575 if (IsEnabled()) {
576 if (new_options != old_options) {
577 DLOG(ERROR) << "Attempting to re-enable tracing with a different "
578 << "set of options.";
579 }
580
581 if (mode != mode_) {
582 DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
583 }
584
585 trace_config_.Merge(trace_config);
586 UpdateCategoryGroupEnabledFlags();
587 return;
588 }
589
590 if (dispatching_to_observer_list_) {
591 DLOG(ERROR)
592 << "Cannot manipulate TraceLog::Enabled state from an observer.";
593 return;
594 }
595
596 mode_ = mode;
597
598 if (new_options != old_options) {
599 subtle::NoBarrier_Store(&trace_options_, new_options);
600 UseNextTraceBuffer();
601 }
602
603 num_traces_recorded_++;
604
605 trace_config_ = TraceConfig(trace_config);
606 UpdateCategoryGroupEnabledFlags();
607 UpdateSyntheticDelaysFromTraceConfig();
608
609 if (new_options & kInternalEnableSampling) {
610 sampling_thread_.reset(new TraceSamplingThread);
611 sampling_thread_->RegisterSampleBucket(
612 &g_trace_state[0], "bucket0",
613 Bind(&TraceSamplingThread::DefaultSamplingCallback));
614 sampling_thread_->RegisterSampleBucket(
615 &g_trace_state[1], "bucket1",
616 Bind(&TraceSamplingThread::DefaultSamplingCallback));
617 sampling_thread_->RegisterSampleBucket(
618 &g_trace_state[2], "bucket2",
619 Bind(&TraceSamplingThread::DefaultSamplingCallback));
620 if (!PlatformThread::Create(0, sampling_thread_.get(),
621 &sampling_thread_handle_)) {
622 DCHECK(false) << "failed to create thread";
623 }
624 }
625
626 dispatching_to_observer_list_ = true;
627 observer_list = enabled_state_observer_list_;
628 }
629 // Notify observers outside the lock in case they trigger trace events.
630 for (size_t i = 0; i < observer_list.size(); ++i)
631 observer_list[i]->OnTraceLogEnabled();
632
633 {
634 AutoLock lock(lock_);
635 dispatching_to_observer_list_ = false;
636 }
637 }
638
SetArgumentFilterPredicate(const ArgumentFilterPredicate & argument_filter_predicate)639 void TraceLog::SetArgumentFilterPredicate(
640 const ArgumentFilterPredicate& argument_filter_predicate) {
641 AutoLock lock(lock_);
642 DCHECK(!argument_filter_predicate.is_null());
643 DCHECK(argument_filter_predicate_.is_null());
644 argument_filter_predicate_ = argument_filter_predicate;
645 }
646
GetInternalOptionsFromTraceConfig(const TraceConfig & config)647 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
648 const TraceConfig& config) {
649 InternalTraceOptions ret =
650 config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
651 if (config.IsArgumentFilterEnabled())
652 ret |= kInternalEnableArgumentFilter;
653 switch (config.GetTraceRecordMode()) {
654 case RECORD_UNTIL_FULL:
655 return ret | kInternalRecordUntilFull;
656 case RECORD_CONTINUOUSLY:
657 return ret | kInternalRecordContinuously;
658 case ECHO_TO_CONSOLE:
659 return ret | kInternalEchoToConsole;
660 case RECORD_AS_MUCH_AS_POSSIBLE:
661 return ret | kInternalRecordAsMuchAsPossible;
662 }
663 NOTREACHED();
664 return kInternalNone;
665 }
666
GetCurrentTraceConfig() const667 TraceConfig TraceLog::GetCurrentTraceConfig() const {
668 AutoLock lock(lock_);
669 return trace_config_;
670 }
671
SetDisabled()672 void TraceLog::SetDisabled() {
673 AutoLock lock(lock_);
674 SetDisabledWhileLocked();
675 }
676
SetDisabledWhileLocked()677 void TraceLog::SetDisabledWhileLocked() {
678 lock_.AssertAcquired();
679
680 if (!IsEnabled())
681 return;
682
683 if (dispatching_to_observer_list_) {
684 DLOG(ERROR)
685 << "Cannot manipulate TraceLog::Enabled state from an observer.";
686 return;
687 }
688
689 mode_ = DISABLED;
690
691 if (sampling_thread_.get()) {
692 // Stop the sampling thread.
693 sampling_thread_->Stop();
694 lock_.Release();
695 PlatformThread::Join(sampling_thread_handle_);
696 lock_.Acquire();
697 sampling_thread_handle_ = PlatformThreadHandle();
698 sampling_thread_.reset();
699 }
700
701 trace_config_.Clear();
702 subtle::NoBarrier_Store(&watch_category_, 0);
703 watch_event_name_ = "";
704 UpdateCategoryGroupEnabledFlags();
705 AddMetadataEventsWhileLocked();
706
707 // Remove metadata events so they will not get added to a subsequent trace.
708 metadata_events_.clear();
709
710 dispatching_to_observer_list_ = true;
711 std::vector<EnabledStateObserver*> observer_list =
712 enabled_state_observer_list_;
713
714 {
715 // Dispatch to observers outside the lock in case the observer triggers a
716 // trace event.
717 AutoUnlock unlock(lock_);
718 for (size_t i = 0; i < observer_list.size(); ++i)
719 observer_list[i]->OnTraceLogDisabled();
720 }
721 dispatching_to_observer_list_ = false;
722 }
723
GetNumTracesRecorded()724 int TraceLog::GetNumTracesRecorded() {
725 AutoLock lock(lock_);
726 if (!IsEnabled())
727 return -1;
728 return num_traces_recorded_;
729 }
730
AddEnabledStateObserver(EnabledStateObserver * listener)731 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
732 AutoLock lock(lock_);
733 enabled_state_observer_list_.push_back(listener);
734 }
735
RemoveEnabledStateObserver(EnabledStateObserver * listener)736 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
737 AutoLock lock(lock_);
738 std::vector<EnabledStateObserver*>::iterator it =
739 std::find(enabled_state_observer_list_.begin(),
740 enabled_state_observer_list_.end(), listener);
741 if (it != enabled_state_observer_list_.end())
742 enabled_state_observer_list_.erase(it);
743 }
744
HasEnabledStateObserver(EnabledStateObserver * listener) const745 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
746 AutoLock lock(lock_);
747 return ContainsValue(enabled_state_observer_list_, listener);
748 }
749
GetStatus() const750 TraceLogStatus TraceLog::GetStatus() const {
751 AutoLock lock(lock_);
752 TraceLogStatus result;
753 result.event_capacity = logged_events_->Capacity();
754 result.event_count = logged_events_->Size();
755 return result;
756 }
757
BufferIsFull() const758 bool TraceLog::BufferIsFull() const {
759 AutoLock lock(lock_);
760 return logged_events_->IsFull();
761 }
762
AddEventToThreadSharedChunkWhileLocked(TraceEventHandle * handle,bool check_buffer_is_full)763 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
764 TraceEventHandle* handle,
765 bool check_buffer_is_full) {
766 lock_.AssertAcquired();
767
768 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
769 logged_events_->ReturnChunk(thread_shared_chunk_index_,
770 std::move(thread_shared_chunk_));
771 }
772
773 if (!thread_shared_chunk_) {
774 thread_shared_chunk_ =
775 logged_events_->GetChunk(&thread_shared_chunk_index_);
776 if (check_buffer_is_full)
777 CheckIfBufferIsFullWhileLocked();
778 }
779 if (!thread_shared_chunk_)
780 return NULL;
781
782 size_t event_index;
783 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
784 if (trace_event && handle) {
785 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
786 event_index, handle);
787 }
788 return trace_event;
789 }
790
CheckIfBufferIsFullWhileLocked()791 void TraceLog::CheckIfBufferIsFullWhileLocked() {
792 lock_.AssertAcquired();
793 if (logged_events_->IsFull()) {
794 if (buffer_limit_reached_timestamp_.is_null()) {
795 buffer_limit_reached_timestamp_ = OffsetNow();
796 }
797 SetDisabledWhileLocked();
798 }
799 }
800
SetEventCallbackEnabled(const TraceConfig & trace_config,EventCallback cb)801 void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
802 EventCallback cb) {
803 AutoLock lock(lock_);
804 subtle::NoBarrier_Store(&event_callback_,
805 reinterpret_cast<subtle::AtomicWord>(cb));
806 event_callback_trace_config_ = trace_config;
807 UpdateCategoryGroupEnabledFlags();
808 }
809
SetEventCallbackDisabled()810 void TraceLog::SetEventCallbackDisabled() {
811 AutoLock lock(lock_);
812 subtle::NoBarrier_Store(&event_callback_, 0);
813 UpdateCategoryGroupEnabledFlags();
814 }
815
816 // Flush() works as the following:
817 // 1. Flush() is called in thread A whose task runner is saved in
818 // flush_task_runner_;
819 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
820 // loop to flush the thread local buffers; otherwise finish the flush;
821 // 3. FlushCurrentThread() deletes the thread local event buffer:
822 // - The last batch of events of the thread are flushed into the main buffer;
823 // - The message loop will be removed from thread_message_loops_;
824 // If this is the last message loop, finish the flush;
825 // 4. If any thread hasn't finish its flush in time, finish the flush.
Flush(const TraceLog::OutputCallback & cb,bool use_worker_thread)826 void TraceLog::Flush(const TraceLog::OutputCallback& cb,
827 bool use_worker_thread) {
828 FlushInternal(cb, use_worker_thread, false);
829 }
830
CancelTracing(const OutputCallback & cb)831 void TraceLog::CancelTracing(const OutputCallback& cb) {
832 SetDisabled();
833 FlushInternal(cb, false, true);
834 }
835
FlushInternal(const TraceLog::OutputCallback & cb,bool use_worker_thread,bool discard_events)836 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
837 bool use_worker_thread,
838 bool discard_events) {
839 use_worker_thread_ = use_worker_thread;
840 if (IsEnabled()) {
841 // Can't flush when tracing is enabled because otherwise PostTask would
842 // - generate more trace events;
843 // - deschedule the calling thread on some platforms causing inaccurate
844 // timing of the trace events.
845 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
846 if (!cb.is_null())
847 cb.Run(empty_result, false);
848 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
849 return;
850 }
851
852 int generation = this->generation();
853 // Copy of thread_message_loops_ to be used without locking.
854 std::vector<scoped_refptr<SingleThreadTaskRunner>>
855 thread_message_loop_task_runners;
856 {
857 AutoLock lock(lock_);
858 DCHECK(!flush_task_runner_);
859 flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
860 ? ThreadTaskRunnerHandle::Get()
861 : nullptr;
862 DCHECK(!thread_message_loops_.size() || flush_task_runner_);
863 flush_output_callback_ = cb;
864
865 if (thread_shared_chunk_) {
866 logged_events_->ReturnChunk(thread_shared_chunk_index_,
867 std::move(thread_shared_chunk_));
868 }
869
870 if (thread_message_loops_.size()) {
871 for (hash_set<MessageLoop*>::const_iterator it =
872 thread_message_loops_.begin();
873 it != thread_message_loops_.end(); ++it) {
874 thread_message_loop_task_runners.push_back((*it)->task_runner());
875 }
876 }
877 }
878
879 if (thread_message_loop_task_runners.size()) {
880 for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
881 thread_message_loop_task_runners[i]->PostTask(
882 FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
883 generation, discard_events));
884 }
885 flush_task_runner_->PostDelayedTask(
886 FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
887 discard_events),
888 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
889 return;
890 }
891
892 FinishFlush(generation, discard_events);
893 }
894
895 // Usually it runs on a different thread.
ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events,const OutputCallback & flush_output_callback,const ArgumentFilterPredicate & argument_filter_predicate)896 void TraceLog::ConvertTraceEventsToTraceFormat(
897 scoped_ptr<TraceBuffer> logged_events,
898 const OutputCallback& flush_output_callback,
899 const ArgumentFilterPredicate& argument_filter_predicate) {
900 if (flush_output_callback.is_null())
901 return;
902
903 // The callback need to be called at least once even if there is no events
904 // to let the caller know the completion of flush.
905 scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
906 while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
907 for (size_t j = 0; j < chunk->size(); ++j) {
908 size_t size = json_events_str_ptr->size();
909 if (size > kTraceEventBufferSizeInBytes) {
910 flush_output_callback.Run(json_events_str_ptr, true);
911 json_events_str_ptr = new RefCountedString();
912 } else if (size) {
913 json_events_str_ptr->data().append(",\n");
914 }
915 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
916 argument_filter_predicate);
917 }
918 }
919 flush_output_callback.Run(json_events_str_ptr, false);
920 }
921
FinishFlush(int generation,bool discard_events)922 void TraceLog::FinishFlush(int generation, bool discard_events) {
923 scoped_ptr<TraceBuffer> previous_logged_events;
924 OutputCallback flush_output_callback;
925 ArgumentFilterPredicate argument_filter_predicate;
926
927 if (!CheckGeneration(generation))
928 return;
929
930 {
931 AutoLock lock(lock_);
932
933 previous_logged_events.swap(logged_events_);
934 UseNextTraceBuffer();
935 thread_message_loops_.clear();
936
937 flush_task_runner_ = NULL;
938 flush_output_callback = flush_output_callback_;
939 flush_output_callback_.Reset();
940
941 if (trace_options() & kInternalEnableArgumentFilter) {
942 CHECK(!argument_filter_predicate_.is_null());
943 argument_filter_predicate = argument_filter_predicate_;
944 }
945 }
946
947 if (discard_events) {
948 if (!flush_output_callback.is_null()) {
949 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
950 flush_output_callback.Run(empty_result, false);
951 }
952 return;
953 }
954
955 if (use_worker_thread_ &&
956 WorkerPool::PostTask(
957 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
958 Passed(&previous_logged_events),
959 flush_output_callback, argument_filter_predicate),
960 true)) {
961 return;
962 }
963
964 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
965 flush_output_callback,
966 argument_filter_predicate);
967 }
968
969 // Run in each thread holding a local event buffer.
FlushCurrentThread(int generation,bool discard_events)970 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
971 {
972 AutoLock lock(lock_);
973 if (!CheckGeneration(generation) || !flush_task_runner_) {
974 // This is late. The corresponding flush has finished.
975 return;
976 }
977 }
978
979 // This will flush the thread local buffer.
980 delete thread_local_event_buffer_.Get();
981
982 AutoLock lock(lock_);
983 if (!CheckGeneration(generation) || !flush_task_runner_ ||
984 thread_message_loops_.size())
985 return;
986
987 flush_task_runner_->PostTask(
988 FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
989 discard_events));
990 }
991
OnFlushTimeout(int generation,bool discard_events)992 void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
993 {
994 AutoLock lock(lock_);
995 if (!CheckGeneration(generation) || !flush_task_runner_) {
996 // Flush has finished before timeout.
997 return;
998 }
999
1000 LOG(WARNING)
1001 << "The following threads haven't finished flush in time. "
1002 "If this happens stably for some thread, please call "
1003 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1004 "the thread to avoid its trace events from being lost.";
1005 for (hash_set<MessageLoop*>::const_iterator it =
1006 thread_message_loops_.begin();
1007 it != thread_message_loops_.end(); ++it) {
1008 LOG(WARNING) << "Thread: " << (*it)->thread_name();
1009 }
1010 }
1011 FinishFlush(generation, discard_events);
1012 }
1013
FlushButLeaveBufferIntact(const TraceLog::OutputCallback & flush_output_callback)1014 void TraceLog::FlushButLeaveBufferIntact(
1015 const TraceLog::OutputCallback& flush_output_callback) {
1016 scoped_ptr<TraceBuffer> previous_logged_events;
1017 ArgumentFilterPredicate argument_filter_predicate;
1018 {
1019 AutoLock lock(lock_);
1020 AddMetadataEventsWhileLocked();
1021 if (thread_shared_chunk_) {
1022 // Return the chunk to the main buffer to flush the sampling data.
1023 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1024 std::move(thread_shared_chunk_));
1025 }
1026 previous_logged_events = logged_events_->CloneForIteration();
1027
1028 if (trace_options() & kInternalEnableArgumentFilter) {
1029 CHECK(!argument_filter_predicate_.is_null());
1030 argument_filter_predicate = argument_filter_predicate_;
1031 }
1032 } // release lock
1033
1034 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
1035 flush_output_callback,
1036 argument_filter_predicate);
1037 }
1038
UseNextTraceBuffer()1039 void TraceLog::UseNextTraceBuffer() {
1040 logged_events_.reset(CreateTraceBuffer());
1041 subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1042 thread_shared_chunk_.reset();
1043 thread_shared_chunk_index_ = 0;
1044 }
1045
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,unsigned long long id,int num_args,const char ** arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,const scoped_refptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1046 TraceEventHandle TraceLog::AddTraceEvent(
1047 char phase,
1048 const unsigned char* category_group_enabled,
1049 const char* name,
1050 unsigned long long id,
1051 int num_args,
1052 const char** arg_names,
1053 const unsigned char* arg_types,
1054 const unsigned long long* arg_values,
1055 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1056 unsigned int flags) {
1057 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1058 base::TimeTicks now = base::TimeTicks::Now();
1059 return AddTraceEventWithThreadIdAndTimestamp(
1060 phase,
1061 category_group_enabled,
1062 name,
1063 id,
1064 trace_event_internal::kNoId, // bind_id
1065 thread_id,
1066 now,
1067 num_args,
1068 arg_names,
1069 arg_types,
1070 arg_values,
1071 convertable_values,
1072 flags);
1073 }
1074
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,unsigned long long id,unsigned long long bind_id,int num_args,const char ** arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,const scoped_refptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1075 TraceEventHandle TraceLog::AddTraceEventWithBindId(
1076 char phase,
1077 const unsigned char* category_group_enabled,
1078 const char* name,
1079 unsigned long long id,
1080 unsigned long long bind_id,
1081 int num_args,
1082 const char** arg_names,
1083 const unsigned char* arg_types,
1084 const unsigned long long* arg_values,
1085 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1086 unsigned int flags) {
1087 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1088 base::TimeTicks now = base::TimeTicks::Now();
1089 return AddTraceEventWithThreadIdAndTimestamp(
1090 phase,
1091 category_group_enabled,
1092 name,
1093 id,
1094 bind_id,
1095 thread_id,
1096 now,
1097 num_args,
1098 arg_names,
1099 arg_types,
1100 arg_values,
1101 convertable_values,
1102 flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
1103 }
1104
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,unsigned long long id,int process_id,int num_args,const char ** arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,const scoped_refptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1105 TraceEventHandle TraceLog::AddTraceEventWithProcessId(
1106 char phase,
1107 const unsigned char* category_group_enabled,
1108 const char* name,
1109 unsigned long long id,
1110 int process_id,
1111 int num_args,
1112 const char** arg_names,
1113 const unsigned char* arg_types,
1114 const unsigned long long* arg_values,
1115 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1116 unsigned int flags) {
1117 base::TimeTicks now = base::TimeTicks::Now();
1118 return AddTraceEventWithThreadIdAndTimestamp(
1119 phase,
1120 category_group_enabled,
1121 name,
1122 id,
1123 trace_event_internal::kNoId, // bind_id
1124 process_id,
1125 now,
1126 num_args,
1127 arg_names,
1128 arg_types,
1129 arg_values,
1130 convertable_values,
1131 flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
1132 }
1133
1134 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1135 // with kNoId as bind_id
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,unsigned long long id,int thread_id,const TimeTicks & timestamp,int num_args,const char ** arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,const scoped_refptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1136 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1137 char phase,
1138 const unsigned char* category_group_enabled,
1139 const char* name,
1140 unsigned long long id,
1141 int thread_id,
1142 const TimeTicks& timestamp,
1143 int num_args,
1144 const char** arg_names,
1145 const unsigned char* arg_types,
1146 const unsigned long long* arg_values,
1147 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1148 unsigned int flags) {
1149 return AddTraceEventWithThreadIdAndTimestamp(
1150 phase,
1151 category_group_enabled,
1152 name,
1153 id,
1154 trace_event_internal::kNoId, // bind_id
1155 thread_id,
1156 timestamp,
1157 num_args,
1158 arg_names,
1159 arg_types,
1160 arg_values,
1161 convertable_values,
1162 flags);
1163 }
1164
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,unsigned long long id,unsigned long long bind_id,int thread_id,const TimeTicks & timestamp,int num_args,const char ** arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,const scoped_refptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1165 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1166 char phase,
1167 const unsigned char* category_group_enabled,
1168 const char* name,
1169 unsigned long long id,
1170 unsigned long long bind_id,
1171 int thread_id,
1172 const TimeTicks& timestamp,
1173 int num_args,
1174 const char** arg_names,
1175 const unsigned char* arg_types,
1176 const unsigned long long* arg_values,
1177 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1178 unsigned int flags) {
1179 TraceEventHandle handle = {0, 0, 0};
1180 if (!*category_group_enabled)
1181 return handle;
1182
1183 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1184 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1185 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1186 if (thread_is_in_trace_event_.Get())
1187 return handle;
1188
1189 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1190
1191 DCHECK(name);
1192 DCHECK(!timestamp.is_null());
1193
1194 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
1195 if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
1196 (flags & TRACE_EVENT_FLAG_FLOW_OUT))
1197 bind_id = MangleEventId(bind_id);
1198 id = MangleEventId(id);
1199 }
1200
1201 TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1202 ThreadTicks thread_now = ThreadNow();
1203
1204 // |thread_local_event_buffer_| can be null if the current thread doesn't have
1205 // a message loop or the message loop is blocked.
1206 InitializeThreadLocalEventBufferIfSupported();
1207 auto thread_local_event_buffer = thread_local_event_buffer_.Get();
1208
1209 // Check and update the current thread name only if the event is for the
1210 // current thread to avoid locks in most cases.
1211 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1212 const char* new_name =
1213 ThreadIdNameManager::GetInstance()->GetName(thread_id);
1214 // Check if the thread name has been set or changed since the previous
1215 // call (if any), but don't bother if the new name is empty. Note this will
1216 // not detect a thread name change within the same char* buffer address: we
1217 // favor common case performance over corner case correctness.
1218 if (new_name != g_current_thread_name.Get().Get() && new_name &&
1219 *new_name) {
1220 g_current_thread_name.Get().Set(new_name);
1221
1222 AutoLock thread_info_lock(thread_info_lock_);
1223
1224 hash_map<int, std::string>::iterator existing_name =
1225 thread_names_.find(thread_id);
1226 if (existing_name == thread_names_.end()) {
1227 // This is a new thread id, and a new name.
1228 thread_names_[thread_id] = new_name;
1229 } else {
1230 // This is a thread id that we've seen before, but potentially with a
1231 // new name.
1232 std::vector<StringPiece> existing_names = base::SplitStringPiece(
1233 existing_name->second, ",", base::KEEP_WHITESPACE,
1234 base::SPLIT_WANT_NONEMPTY);
1235 bool found = std::find(existing_names.begin(), existing_names.end(),
1236 new_name) != existing_names.end();
1237 if (!found) {
1238 if (existing_names.size())
1239 existing_name->second.push_back(',');
1240 existing_name->second.append(new_name);
1241 }
1242 }
1243 }
1244 }
1245
1246 #if defined(OS_WIN)
1247 // This is done sooner rather than later, to avoid creating the event and
1248 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1249 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
1250 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1251 num_args, arg_names, arg_types, arg_values,
1252 convertable_values);
1253 #endif // OS_WIN
1254
1255 std::string console_message;
1256 if (*category_group_enabled &
1257 (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
1258 OptionalAutoLock lock(&lock_);
1259
1260 TraceEvent* trace_event = NULL;
1261 if (thread_local_event_buffer) {
1262 trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1263 } else {
1264 lock.EnsureAcquired();
1265 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1266 }
1267
1268 if (trace_event) {
1269 trace_event->Initialize(thread_id,
1270 offset_event_timestamp,
1271 thread_now,
1272 phase,
1273 category_group_enabled,
1274 name,
1275 id,
1276 bind_id,
1277 num_args,
1278 arg_names,
1279 arg_types,
1280 arg_values,
1281 convertable_values,
1282 flags);
1283
1284 #if defined(OS_ANDROID)
1285 trace_event->SendToATrace();
1286 #endif
1287 }
1288
1289 if (trace_options() & kInternalEchoToConsole) {
1290 console_message = EventToConsoleMessage(
1291 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1292 timestamp, trace_event);
1293 }
1294 }
1295
1296 if (console_message.size())
1297 LOG(ERROR) << console_message;
1298
1299 if (reinterpret_cast<const unsigned char*>(
1300 subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
1301 bool event_name_matches;
1302 WatchEventCallback watch_event_callback_copy;
1303 {
1304 AutoLock lock(lock_);
1305 event_name_matches = watch_event_name_ == name;
1306 watch_event_callback_copy = watch_event_callback_;
1307 }
1308 if (event_name_matches) {
1309 if (!watch_event_callback_copy.is_null())
1310 watch_event_callback_copy.Run();
1311 }
1312 }
1313
1314 if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1315 EventCallback event_callback = reinterpret_cast<EventCallback>(
1316 subtle::NoBarrier_Load(&event_callback_));
1317 if (event_callback) {
1318 event_callback(
1319 offset_event_timestamp,
1320 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1321 category_group_enabled, name, id, num_args, arg_names, arg_types,
1322 arg_values, flags);
1323 }
1324 }
1325
1326 if (base::trace_event::AllocationContextTracker::capture_enabled()) {
1327 if (phase == TRACE_EVENT_PHASE_BEGIN || phase == TRACE_EVENT_PHASE_COMPLETE)
1328 base::trace_event::AllocationContextTracker::PushPseudoStackFrame(name);
1329 else if (phase == TRACE_EVENT_PHASE_END)
1330 // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
1331 // is in |TraceLog::UpdateTraceEventDuration|.
1332 base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
1333 }
1334
1335 return handle;
1336 }
1337
AddMetadataEvent(const char * name,int num_args,const char ** arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,const scoped_refptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1338 void TraceLog::AddMetadataEvent(
1339 const char* name,
1340 int num_args,
1341 const char** arg_names,
1342 const unsigned char* arg_types,
1343 const unsigned long long* arg_values,
1344 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1345 unsigned int flags) {
1346 scoped_ptr<TraceEvent> trace_event(new TraceEvent);
1347 AutoLock lock(lock_);
1348 trace_event->Initialize(
1349 0, // thread_id
1350 TimeTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA,
1351 &g_category_group_enabled[g_category_metadata], name,
1352 trace_event_internal::kNoId, // id
1353 trace_event_internal::kNoId, // bind_id
1354 num_args, arg_names, arg_types, arg_values, convertable_values, flags);
1355 metadata_events_.push_back(std::move(trace_event));
1356 }
1357
1358 // May be called when a COMPELETE event ends and the unfinished event has been
1359 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
EventToConsoleMessage(unsigned char phase,const TimeTicks & timestamp,TraceEvent * trace_event)1360 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1361 const TimeTicks& timestamp,
1362 TraceEvent* trace_event) {
1363 AutoLock thread_info_lock(thread_info_lock_);
1364
1365 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1366 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1367 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1368
1369 TimeDelta duration;
1370 int thread_id =
1371 trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
1372 if (phase == TRACE_EVENT_PHASE_END) {
1373 duration = timestamp - thread_event_start_times_[thread_id].top();
1374 thread_event_start_times_[thread_id].pop();
1375 }
1376
1377 std::string thread_name = thread_names_[thread_id];
1378 if (thread_colors_.find(thread_name) == thread_colors_.end())
1379 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1380
1381 std::ostringstream log;
1382 log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
1383 thread_colors_[thread_name]);
1384
1385 size_t depth = 0;
1386 if (thread_event_start_times_.find(thread_id) !=
1387 thread_event_start_times_.end())
1388 depth = thread_event_start_times_[thread_id].size();
1389
1390 for (size_t i = 0; i < depth; ++i)
1391 log << "| ";
1392
1393 if (trace_event)
1394 trace_event->AppendPrettyPrinted(&log);
1395 if (phase == TRACE_EVENT_PHASE_END)
1396 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1397
1398 log << "\x1b[0;m";
1399
1400 if (phase == TRACE_EVENT_PHASE_BEGIN)
1401 thread_event_start_times_[thread_id].push(timestamp);
1402
1403 return log.str();
1404 }
1405
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1406 void TraceLog::UpdateTraceEventDuration(
1407 const unsigned char* category_group_enabled,
1408 const char* name,
1409 TraceEventHandle handle) {
1410 char category_group_enabled_local = *category_group_enabled;
1411 if (!category_group_enabled_local)
1412 return;
1413
1414 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1415 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1416 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1417 if (thread_is_in_trace_event_.Get())
1418 return;
1419
1420 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1421
1422 ThreadTicks thread_now = ThreadNow();
1423 TimeTicks now = OffsetNow();
1424
1425 #if defined(OS_WIN)
1426 // Generate an ETW event that marks the end of a complete event.
1427 if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT)
1428 TraceEventETWExport::AddCompleteEndEvent(name);
1429 #endif // OS_WIN
1430
1431 std::string console_message;
1432 if (category_group_enabled_local & ENABLED_FOR_RECORDING) {
1433 OptionalAutoLock lock(&lock_);
1434
1435 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1436 if (trace_event) {
1437 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1438 trace_event->UpdateDuration(now, thread_now);
1439 #if defined(OS_ANDROID)
1440 trace_event->SendToATrace();
1441 #endif
1442 }
1443
1444 if (trace_options() & kInternalEchoToConsole) {
1445 console_message =
1446 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
1447 }
1448
1449 if (base::trace_event::AllocationContextTracker::capture_enabled()) {
1450 // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
1451 base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
1452 }
1453 }
1454
1455 if (console_message.size())
1456 LOG(ERROR) << console_message;
1457
1458 if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
1459 EventCallback event_callback = reinterpret_cast<EventCallback>(
1460 subtle::NoBarrier_Load(&event_callback_));
1461 if (event_callback) {
1462 event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
1463 trace_event_internal::kNoId, 0,
1464 nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
1465 }
1466 }
1467 }
1468
SetWatchEvent(const std::string & category_name,const std::string & event_name,const WatchEventCallback & callback)1469 void TraceLog::SetWatchEvent(const std::string& category_name,
1470 const std::string& event_name,
1471 const WatchEventCallback& callback) {
1472 const unsigned char* category =
1473 GetCategoryGroupEnabled(category_name.c_str());
1474 AutoLock lock(lock_);
1475 subtle::NoBarrier_Store(&watch_category_,
1476 reinterpret_cast<subtle::AtomicWord>(category));
1477 watch_event_name_ = event_name;
1478 watch_event_callback_ = callback;
1479 }
1480
CancelWatchEvent()1481 void TraceLog::CancelWatchEvent() {
1482 AutoLock lock(lock_);
1483 subtle::NoBarrier_Store(&watch_category_, 0);
1484 watch_event_name_ = "";
1485 watch_event_callback_.Reset();
1486 }
1487
MangleEventId(uint64_t id)1488 uint64_t TraceLog::MangleEventId(uint64_t id) {
1489 return id ^ process_id_hash_;
1490 }
1491
AddMetadataEventsWhileLocked()1492 void TraceLog::AddMetadataEventsWhileLocked() {
1493 lock_.AssertAcquired();
1494
1495 // Copy metadata added by |AddMetadataEvent| into the trace log.
1496 for (const scoped_ptr<TraceEvent>& event : metadata_events_)
1497 AddEventToThreadSharedChunkWhileLocked(nullptr, false)->CopyFrom(*event);
1498
1499 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
1500 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1501 0, "num_cpus", "number",
1502 base::SysInfo::NumberOfProcessors());
1503 #endif
1504
1505 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1506 if (process_sort_index_ != 0) {
1507 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1508 current_thread_id, "process_sort_index",
1509 "sort_index", process_sort_index_);
1510 }
1511
1512 if (process_name_.size()) {
1513 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1514 current_thread_id, "process_name", "name",
1515 process_name_);
1516 }
1517
1518 if (process_labels_.size() > 0) {
1519 std::vector<std::string> labels;
1520 for (base::hash_map<int, std::string>::iterator it =
1521 process_labels_.begin();
1522 it != process_labels_.end(); it++) {
1523 labels.push_back(it->second);
1524 }
1525 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1526 current_thread_id, "process_labels", "labels",
1527 base::JoinString(labels, ","));
1528 }
1529
1530 // Thread sort indices.
1531 for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
1532 it != thread_sort_indices_.end(); it++) {
1533 if (it->second == 0)
1534 continue;
1535 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1536 it->first, "thread_sort_index", "sort_index",
1537 it->second);
1538 }
1539
1540 // Thread names.
1541 AutoLock thread_info_lock(thread_info_lock_);
1542 for (hash_map<int, std::string>::iterator it = thread_names_.begin();
1543 it != thread_names_.end(); it++) {
1544 if (it->second.empty())
1545 continue;
1546 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1547 it->first, "thread_name", "name", it->second);
1548 }
1549
1550 // If buffer is full, add a metadata record to report this.
1551 if (!buffer_limit_reached_timestamp_.is_null()) {
1552 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
1553 current_thread_id, "trace_buffer_overflowed",
1554 "overflowed_at_ts",
1555 buffer_limit_reached_timestamp_);
1556 }
1557 }
1558
WaitSamplingEventForTesting()1559 void TraceLog::WaitSamplingEventForTesting() {
1560 if (!sampling_thread_)
1561 return;
1562 sampling_thread_->WaitSamplingEventForTesting();
1563 }
1564
DeleteForTesting()1565 void TraceLog::DeleteForTesting() {
1566 internal::DeleteTraceLogForTesting::Delete();
1567 }
1568
GetEventByHandle(TraceEventHandle handle)1569 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
1570 return GetEventByHandleInternal(handle, NULL);
1571 }
1572
GetEventByHandleInternal(TraceEventHandle handle,OptionalAutoLock * lock)1573 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
1574 OptionalAutoLock* lock) {
1575 if (!handle.chunk_seq)
1576 return NULL;
1577
1578 if (thread_local_event_buffer_.Get()) {
1579 TraceEvent* trace_event =
1580 thread_local_event_buffer_.Get()->GetEventByHandle(handle);
1581 if (trace_event)
1582 return trace_event;
1583 }
1584
1585 // The event has been out-of-control of the thread local buffer.
1586 // Try to get the event from the main buffer with a lock.
1587 if (lock)
1588 lock->EnsureAcquired();
1589
1590 if (thread_shared_chunk_ &&
1591 handle.chunk_index == thread_shared_chunk_index_) {
1592 return handle.chunk_seq == thread_shared_chunk_->seq()
1593 ? thread_shared_chunk_->GetEventAt(handle.event_index)
1594 : NULL;
1595 }
1596
1597 return logged_events_->GetEventByHandle(handle);
1598 }
1599
SetProcessID(int process_id)1600 void TraceLog::SetProcessID(int process_id) {
1601 process_id_ = process_id;
1602 // Create a FNV hash from the process ID for XORing.
1603 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1604 unsigned long long offset_basis = 14695981039346656037ull;
1605 unsigned long long fnv_prime = 1099511628211ull;
1606 unsigned long long pid = static_cast<unsigned long long>(process_id_);
1607 process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
1608 }
1609
SetProcessSortIndex(int sort_index)1610 void TraceLog::SetProcessSortIndex(int sort_index) {
1611 AutoLock lock(lock_);
1612 process_sort_index_ = sort_index;
1613 }
1614
SetProcessName(const std::string & process_name)1615 void TraceLog::SetProcessName(const std::string& process_name) {
1616 AutoLock lock(lock_);
1617 process_name_ = process_name;
1618 }
1619
UpdateProcessLabel(int label_id,const std::string & current_label)1620 void TraceLog::UpdateProcessLabel(int label_id,
1621 const std::string& current_label) {
1622 if (!current_label.length())
1623 return RemoveProcessLabel(label_id);
1624
1625 AutoLock lock(lock_);
1626 process_labels_[label_id] = current_label;
1627 }
1628
RemoveProcessLabel(int label_id)1629 void TraceLog::RemoveProcessLabel(int label_id) {
1630 AutoLock lock(lock_);
1631 base::hash_map<int, std::string>::iterator it =
1632 process_labels_.find(label_id);
1633 if (it == process_labels_.end())
1634 return;
1635
1636 process_labels_.erase(it);
1637 }
1638
SetThreadSortIndex(PlatformThreadId thread_id,int sort_index)1639 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
1640 AutoLock lock(lock_);
1641 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
1642 }
1643
SetTimeOffset(TimeDelta offset)1644 void TraceLog::SetTimeOffset(TimeDelta offset) {
1645 time_offset_ = offset;
1646 }
1647
GetObserverCountForTest() const1648 size_t TraceLog::GetObserverCountForTest() const {
1649 return enabled_state_observer_list_.size();
1650 }
1651
SetCurrentThreadBlocksMessageLoop()1652 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
1653 thread_blocks_message_loop_.Set(true);
1654 if (thread_local_event_buffer_.Get()) {
1655 // This will flush the thread local buffer.
1656 delete thread_local_event_buffer_.Get();
1657 }
1658 }
1659
CreateTraceBuffer()1660 TraceBuffer* TraceLog::CreateTraceBuffer() {
1661 InternalTraceOptions options = trace_options();
1662 if (options & kInternalRecordContinuously)
1663 return TraceBuffer::CreateTraceBufferRingBuffer(
1664 kTraceEventRingBufferChunks);
1665 else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
1666 return TraceBuffer::CreateTraceBufferRingBuffer(
1667 kMonitorTraceEventBufferChunks);
1668 else if (options & kInternalEchoToConsole)
1669 return TraceBuffer::CreateTraceBufferRingBuffer(
1670 kEchoToConsoleTraceEventBufferChunks);
1671 else if (options & kInternalRecordAsMuchAsPossible)
1672 return TraceBuffer::CreateTraceBufferVectorOfSize(
1673 kTraceEventVectorBigBufferChunks);
1674 return TraceBuffer::CreateTraceBufferVectorOfSize(
1675 kTraceEventVectorBufferChunks);
1676 }
1677
1678 #if defined(OS_WIN)
UpdateETWCategoryGroupEnabledFlags()1679 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
1680 AutoLock lock(lock_);
1681 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
1682 // Go through each category and set/clear the ETW bit depending on whether the
1683 // category is enabled.
1684 for (size_t i = 0; i < category_index; i++) {
1685 const char* category_group = g_category_groups[i];
1686 DCHECK(category_group);
1687 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
1688 category_group)) {
1689 g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
1690 } else {
1691 g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
1692 }
1693 }
1694 }
1695 #endif // defined(OS_WIN)
1696
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)1697 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
1698 TraceEventMemoryOverhead* overhead) {
1699 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
1700 }
1701
1702 } // namespace trace_event
1703 } // namespace base
1704
1705 namespace trace_event_internal {
1706
ScopedTraceBinaryEfficient(const char * category_group,const char * name)1707 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
1708 const char* category_group,
1709 const char* name) {
1710 // The single atom works because for now the category_group can only be "gpu".
1711 DCHECK_EQ(strcmp(category_group, "gpu"), 0);
1712 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
1713 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
1714 category_group, atomic, category_group_enabled_);
1715 name_ = name;
1716 if (*category_group_enabled_) {
1717 event_handle_ =
1718 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
1719 TRACE_EVENT_PHASE_COMPLETE,
1720 category_group_enabled_,
1721 name,
1722 trace_event_internal::kNoId, // id
1723 static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
1724 base::TimeTicks::Now(),
1725 trace_event_internal::kZeroNumArgs,
1726 nullptr,
1727 nullptr,
1728 nullptr,
1729 nullptr,
1730 TRACE_EVENT_FLAG_NONE);
1731 }
1732 }
1733
~ScopedTraceBinaryEfficient()1734 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1735 if (*category_group_enabled_) {
1736 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1737 event_handle_);
1738 }
1739 }
1740
1741 } // namespace trace_event_internal
1742