1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/trace_log.h"
6 
7 #include <algorithm>
8 #include <cmath>
9 #include <memory>
10 #include <utility>
11 
12 #include "base/base_switches.h"
13 #include "base/bind.h"
14 #include "base/command_line.h"
15 #include "base/debug/leak_annotations.h"
16 #include "base/location.h"
17 #include "base/macros.h"
18 #include "base/memory/ptr_util.h"
19 #include "base/memory/ref_counted_memory.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/message_loop/message_loop_current.h"
22 #include "base/no_destructor.h"
23 #include "base/process/process_info.h"
24 #include "base/process/process_metrics.h"
25 #include "base/stl_util.h"
26 #include "base/strings/string_piece.h"
27 #include "base/strings/string_split.h"
28 #include "base/strings/string_tokenizer.h"
29 #include "base/strings/stringprintf.h"
30 #include "base/sys_info.h"
31 #include "base/task_scheduler/post_task.h"
32 #include "base/threading/platform_thread.h"
33 #include "base/threading/sequenced_task_runner_handle.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/threading/thread_task_runner_handle.h"
36 #include "base/time/time.h"
37 #include "base/trace_event/category_registry.h"
38 #include "base/trace_event/event_name_filter.h"
39 #include "base/trace_event/heap_profiler.h"
40 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
41 #include "base/trace_event/heap_profiler_event_filter.h"
42 #include "base/trace_event/memory_dump_manager.h"
43 #include "base/trace_event/memory_dump_provider.h"
44 #include "base/trace_event/process_memory_dump.h"
45 #include "base/trace_event/trace_buffer.h"
46 #include "base/trace_event/trace_event.h"
47 #include "build/build_config.h"
48 
49 #if defined(OS_WIN)
50 #include "base/trace_event/trace_event_etw_export_win.h"
51 #endif
52 
53 #if defined(OS_ANDROID)
54 // The linker assigns the virtual address of the start of current library to
55 // this symbol.
56 extern char __executable_start;
57 #endif
58 
59 namespace base {
60 namespace trace_event {
61 
62 namespace {
63 
64 // Controls the number of trace events we will buffer in-memory
65 // before throwing them away.
66 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
67 
68 const size_t kTraceEventVectorBigBufferChunks =
69     512000000 / kTraceBufferChunkSize;
70 static_assert(
71     kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
72     "Too many big buffer chunks");
73 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
74 static_assert(
75     kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
76     "Too many vector buffer chunks");
77 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
78 
79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
80 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
81 
82 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
83 const int kThreadFlushTimeoutMs = 3000;
84 
85 TraceLog* g_trace_log_for_testing = nullptr;
86 
87 #define MAX_TRACE_EVENT_FILTERS 32
88 
89 // List of TraceEventFilter objects from the most recent tracing session.
GetCategoryGroupFilters()90 std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
91   static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
92   return *filters;
93 }
94 
ThreadNow()95 ThreadTicks ThreadNow() {
96   return ThreadTicks::IsSupported()
97              ? base::subtle::ThreadTicksNowIgnoringOverride()
98              : ThreadTicks();
99 }
100 
101 template <typename T>
InitializeMetadataEvent(TraceEvent * trace_event,int thread_id,const char * metadata_name,const char * arg_name,const T & value)102 void InitializeMetadataEvent(TraceEvent* trace_event,
103                              int thread_id,
104                              const char* metadata_name,
105                              const char* arg_name,
106                              const T& value) {
107   if (!trace_event)
108     return;
109 
110   int num_args = 1;
111   unsigned char arg_type;
112   unsigned long long arg_value;
113   ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
114   trace_event->Initialize(
115       thread_id,
116       TimeTicks(),
117       ThreadTicks(),
118       TRACE_EVENT_PHASE_METADATA,
119       CategoryRegistry::kCategoryMetadata->state_ptr(),
120       metadata_name,
121       trace_event_internal::kGlobalScope,  // scope
122       trace_event_internal::kNoId,  // id
123       trace_event_internal::kNoId,  // bind_id
124       num_args,
125       &arg_name,
126       &arg_type,
127       &arg_value,
128       nullptr,
129       TRACE_EVENT_FLAG_NONE);
130 }
131 
132 class AutoThreadLocalBoolean {
133  public:
AutoThreadLocalBoolean(ThreadLocalBoolean * thread_local_boolean)134   explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
135       : thread_local_boolean_(thread_local_boolean) {
136     DCHECK(!thread_local_boolean_->Get());
137     thread_local_boolean_->Set(true);
138   }
~AutoThreadLocalBoolean()139   ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
140 
141  private:
142   ThreadLocalBoolean* thread_local_boolean_;
143   DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
144 };
145 
146 // Use this function instead of TraceEventHandle constructor to keep the
147 // overhead of ScopedTracer (trace_event.h) constructor minimum.
MakeHandle(uint32_t chunk_seq,size_t chunk_index,size_t event_index,TraceEventHandle * handle)148 void MakeHandle(uint32_t chunk_seq,
149                 size_t chunk_index,
150                 size_t event_index,
151                 TraceEventHandle* handle) {
152   DCHECK(chunk_seq);
153   DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
154   DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
155   DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
156   handle->chunk_seq = chunk_seq;
157   handle->chunk_index = static_cast<uint16_t>(chunk_index);
158   handle->event_index = static_cast<uint16_t>(event_index);
159 }
160 
161 template <typename Function>
ForEachCategoryFilter(const unsigned char * category_group_enabled,Function filter_fn)162 void ForEachCategoryFilter(const unsigned char* category_group_enabled,
163                            Function filter_fn) {
164   const TraceCategory* category =
165       CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
166   uint32_t filter_bitmap = category->enabled_filters();
167   for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
168     if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
169       filter_fn(GetCategoryGroupFilters()[index].get());
170   }
171 }
172 
173 }  // namespace
174 
175 // A helper class that allows the lock to be acquired in the middle of the scope
176 // and unlocks at the end of scope if locked.
177 class TraceLog::OptionalAutoLock {
178  public:
OptionalAutoLock(Lock * lock)179   explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
180 
~OptionalAutoLock()181   ~OptionalAutoLock() {
182     if (locked_)
183       lock_->Release();
184   }
185 
EnsureAcquired()186   void EnsureAcquired() {
187     if (!locked_) {
188       lock_->Acquire();
189       locked_ = true;
190     }
191   }
192 
193  private:
194   Lock* lock_;
195   bool locked_;
196   DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
197 };
198 
199 class TraceLog::ThreadLocalEventBuffer
200     : public MessageLoopCurrent::DestructionObserver,
201       public MemoryDumpProvider {
202  public:
203   explicit ThreadLocalEventBuffer(TraceLog* trace_log);
204   ~ThreadLocalEventBuffer() override;
205 
206   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
207 
GetEventByHandle(TraceEventHandle handle)208   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
209     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
210         handle.chunk_index != chunk_index_) {
211       return nullptr;
212     }
213 
214     return chunk_->GetEventAt(handle.event_index);
215   }
216 
generation() const217   int generation() const { return generation_; }
218 
219  private:
220   // MessageLoopCurrent::DestructionObserver
221   void WillDestroyCurrentMessageLoop() override;
222 
223   // MemoryDumpProvider implementation.
224   bool OnMemoryDump(const MemoryDumpArgs& args,
225                     ProcessMemoryDump* pmd) override;
226 
227   void FlushWhileLocked();
228 
CheckThisIsCurrentBuffer() const229   void CheckThisIsCurrentBuffer() const {
230     DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
231   }
232 
233   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
234   // as long as the thread exists.
235   TraceLog* trace_log_;
236   std::unique_ptr<TraceBufferChunk> chunk_;
237   size_t chunk_index_;
238   int generation_;
239 
240   DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
241 };
242 
ThreadLocalEventBuffer(TraceLog * trace_log)243 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
244     : trace_log_(trace_log),
245       chunk_index_(0),
246       generation_(trace_log->generation()) {
247   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
248   // the following message_loop won't be NULL.
249   MessageLoop* message_loop = MessageLoop::current();
250   message_loop->AddDestructionObserver(this);
251 
252   // This is to report the local memory usage when memory-infra is enabled.
253   MemoryDumpManager::GetInstance()->RegisterDumpProvider(
254       this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
255 
256   AutoLock lock(trace_log->lock_);
257   trace_log->thread_message_loops_.insert(message_loop);
258 }
259 
~ThreadLocalEventBuffer()260 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
261   CheckThisIsCurrentBuffer();
262   MessageLoop::current()->RemoveDestructionObserver(this);
263   MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
264 
265   {
266     AutoLock lock(trace_log_->lock_);
267     FlushWhileLocked();
268     trace_log_->thread_message_loops_.erase(MessageLoop::current());
269   }
270   trace_log_->thread_local_event_buffer_.Set(nullptr);
271 }
272 
AddTraceEvent(TraceEventHandle * handle)273 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
274     TraceEventHandle* handle) {
275   CheckThisIsCurrentBuffer();
276 
277   if (chunk_ && chunk_->IsFull()) {
278     AutoLock lock(trace_log_->lock_);
279     FlushWhileLocked();
280     chunk_.reset();
281   }
282   if (!chunk_) {
283     AutoLock lock(trace_log_->lock_);
284     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
285     trace_log_->CheckIfBufferIsFullWhileLocked();
286   }
287   if (!chunk_)
288     return nullptr;
289 
290   size_t event_index;
291   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
292   if (trace_event && handle)
293     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
294 
295   return trace_event;
296 }
297 
WillDestroyCurrentMessageLoop()298 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
299   delete this;
300 }
301 
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)302 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
303                                                     ProcessMemoryDump* pmd) {
304   if (!chunk_)
305     return true;
306   std::string dump_base_name = StringPrintf(
307       "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
308   TraceEventMemoryOverhead overhead;
309   chunk_->EstimateTraceMemoryOverhead(&overhead);
310   overhead.DumpInto(dump_base_name.c_str(), pmd);
311   return true;
312 }
313 
FlushWhileLocked()314 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
315   if (!chunk_)
316     return;
317 
318   trace_log_->lock_.AssertAcquired();
319   if (trace_log_->CheckGeneration(generation_)) {
320     // Return the chunk to the buffer only if the generation matches.
321     trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
322   }
323   // Otherwise this method may be called from the destructor, or TraceLog will
324   // find the generation mismatch and delete this buffer soon.
325 }
326 
SetAddTraceEventOverride(const AddTraceEventOverrideCallback & override)327 void TraceLog::SetAddTraceEventOverride(
328     const AddTraceEventOverrideCallback& override) {
329   subtle::NoBarrier_Store(&trace_event_override_,
330                           reinterpret_cast<subtle::AtomicWord>(override));
331 }
332 
333 struct TraceLog::RegisteredAsyncObserver {
RegisteredAsyncObserverbase::trace_event::TraceLog::RegisteredAsyncObserver334   explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
335       : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
336   ~RegisteredAsyncObserver() = default;
337 
338   WeakPtr<AsyncEnabledStateObserver> observer;
339   scoped_refptr<SequencedTaskRunner> task_runner;
340 };
341 
TraceLogStatus()342 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
343 
344 TraceLogStatus::~TraceLogStatus() = default;
345 
346 // static
GetInstance()347 TraceLog* TraceLog::GetInstance() {
348   static base::NoDestructor<TraceLog> instance;
349   return instance.get();
350 }
351 
352 // static
ResetForTesting()353 void TraceLog::ResetForTesting() {
354   if (!g_trace_log_for_testing)
355     return;
356   CategoryRegistry::ResetForTesting();
357   g_trace_log_for_testing->~TraceLog();
358   new (g_trace_log_for_testing) TraceLog;
359 }
360 
TraceLog()361 TraceLog::TraceLog()
362     : enabled_modes_(0),
363       num_traces_recorded_(0),
364       dispatching_to_observer_list_(false),
365       process_sort_index_(0),
366       process_id_hash_(0),
367       process_id_(0),
368       trace_options_(kInternalRecordUntilFull),
369       trace_config_(TraceConfig()),
370       thread_shared_chunk_index_(0),
371       generation_(0),
372       use_worker_thread_(false),
373       trace_event_override_(0),
374       filter_factory_for_testing_(nullptr) {
375   CategoryRegistry::Initialize();
376 
377 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
378   SetProcessID(0);
379 #else
380   SetProcessID(static_cast<int>(GetCurrentProcId()));
381 #endif
382 
383 // Linux renderer processes and Android O processes are not allowed to read
384 // "proc/stat" file, crbug.com/788870.
385 #if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
386   process_creation_time_ = CurrentProcessInfo::CreationTime();
387 #else
388   // Use approximate time when creation time is not available.
389   process_creation_time_ = TRACE_TIME_NOW();
390 #endif
391 
392   logged_events_.reset(CreateTraceBuffer());
393 
394   MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
395                                                          nullptr);
396   g_trace_log_for_testing = this;
397 }
398 
399 TraceLog::~TraceLog() = default;
400 
InitializeThreadLocalEventBufferIfSupported()401 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
402   // A ThreadLocalEventBuffer needs the message loop
403   // - to know when the thread exits;
404   // - to handle the final flush.
405   // For a thread without a message loop or the message loop may be blocked, the
406   // trace events will be added into the main buffer directly.
407   if (thread_blocks_message_loop_.Get() || !MessageLoopCurrent::IsSet())
408     return;
409   HEAP_PROFILER_SCOPED_IGNORE;
410   auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
411   if (thread_local_event_buffer &&
412       !CheckGeneration(thread_local_event_buffer->generation())) {
413     delete thread_local_event_buffer;
414     thread_local_event_buffer = nullptr;
415   }
416   if (!thread_local_event_buffer) {
417     thread_local_event_buffer = new ThreadLocalEventBuffer(this);
418     thread_local_event_buffer_.Set(thread_local_event_buffer);
419   }
420 }
421 
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)422 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
423                             ProcessMemoryDump* pmd) {
424   // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
425   // (crbug.com/499731).
426   TraceEventMemoryOverhead overhead;
427   overhead.Add(TraceEventMemoryOverhead::kOther, sizeof(*this));
428   {
429     AutoLock lock(lock_);
430     if (logged_events_)
431       logged_events_->EstimateTraceMemoryOverhead(&overhead);
432 
433     for (auto& metadata_event : metadata_events_)
434       metadata_event->EstimateTraceMemoryOverhead(&overhead);
435   }
436   overhead.AddSelf();
437   overhead.DumpInto("tracing/main_trace_log", pmd);
438   return true;
439 }
440 
GetCategoryGroupEnabled(const char * category_group)441 const unsigned char* TraceLog::GetCategoryGroupEnabled(
442     const char* category_group) {
443   TraceLog* tracelog = GetInstance();
444   if (!tracelog) {
445     DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
446     return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
447   }
448   TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
449   if (!category) {
450     // Slow path: in the case of a new category we have to repeat the check
451     // holding the lock, as multiple threads might have reached this point
452     // at the same time.
453     auto category_initializer = [](TraceCategory* category) {
454       TraceLog::GetInstance()->UpdateCategoryState(category);
455     };
456     AutoLock lock(tracelog->lock_);
457     CategoryRegistry::GetOrCreateCategoryLocked(
458         category_group, category_initializer, &category);
459   }
460   DCHECK(category->state_ptr());
461   return category->state_ptr();
462 }
463 
GetCategoryGroupName(const unsigned char * category_group_enabled)464 const char* TraceLog::GetCategoryGroupName(
465     const unsigned char* category_group_enabled) {
466   return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
467       ->name();
468 }
469 
UpdateCategoryState(TraceCategory * category)470 void TraceLog::UpdateCategoryState(TraceCategory* category) {
471   lock_.AssertAcquired();
472   DCHECK(category->is_valid());
473   unsigned char state_flags = 0;
474   if (enabled_modes_ & RECORDING_MODE &&
475       trace_config_.IsCategoryGroupEnabled(category->name())) {
476     state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
477   }
478 
479   // TODO(primiano): this is a temporary workaround for catapult:#2341,
480   // to guarantee that metadata events are always added even if the category
481   // filter is "-*". See crbug.com/618054 for more details and long-term fix.
482   if (enabled_modes_ & RECORDING_MODE &&
483       category == CategoryRegistry::kCategoryMetadata) {
484     state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
485   }
486 
487 #if defined(OS_WIN)
488   if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
489           category->name())) {
490     state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
491   }
492 #endif
493 
494   uint32_t enabled_filters_bitmap = 0;
495   int index = 0;
496   for (const auto& event_filter : enabled_event_filters_) {
497     if (event_filter.IsCategoryGroupEnabled(category->name())) {
498       state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
499       DCHECK(GetCategoryGroupFilters()[index]);
500       enabled_filters_bitmap |= 1 << index;
501     }
502     if (index++ >= MAX_TRACE_EVENT_FILTERS) {
503       NOTREACHED();
504       break;
505     }
506   }
507   category->set_enabled_filters(enabled_filters_bitmap);
508   category->set_state(state_flags);
509 }
510 
UpdateCategoryRegistry()511 void TraceLog::UpdateCategoryRegistry() {
512   lock_.AssertAcquired();
513   CreateFiltersForTraceConfig();
514   for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
515     UpdateCategoryState(&category);
516   }
517 }
518 
CreateFiltersForTraceConfig()519 void TraceLog::CreateFiltersForTraceConfig() {
520   if (!(enabled_modes_ & FILTERING_MODE))
521     return;
522 
523   // Filters were already added and tracing could be enabled. Filters list
524   // cannot be changed when trace events are using them.
525   if (GetCategoryGroupFilters().size())
526     return;
527 
528   for (auto& filter_config : enabled_event_filters_) {
529     if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
530       NOTREACHED()
531           << "Too many trace event filters installed in the current session";
532       break;
533     }
534 
535     std::unique_ptr<TraceEventFilter> new_filter;
536     const std::string& predicate_name = filter_config.predicate_name();
537     if (predicate_name == EventNameFilter::kName) {
538       auto whitelist = std::make_unique<std::unordered_set<std::string>>();
539       CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
540       new_filter = std::make_unique<EventNameFilter>(std::move(whitelist));
541     } else if (predicate_name == HeapProfilerEventFilter::kName) {
542       new_filter = std::make_unique<HeapProfilerEventFilter>();
543     } else {
544       if (filter_factory_for_testing_)
545         new_filter = filter_factory_for_testing_(predicate_name);
546       CHECK(new_filter) << "Unknown trace filter " << predicate_name;
547     }
548     GetCategoryGroupFilters().push_back(std::move(new_filter));
549   }
550 }
551 
GetKnownCategoryGroups(std::vector<std::string> * category_groups)552 void TraceLog::GetKnownCategoryGroups(
553     std::vector<std::string>* category_groups) {
554   for (const auto& category : CategoryRegistry::GetAllCategories()) {
555     if (!CategoryRegistry::IsBuiltinCategory(&category))
556       category_groups->push_back(category.name());
557   }
558 }
559 
SetEnabled(const TraceConfig & trace_config,uint8_t modes_to_enable)560 void TraceLog::SetEnabled(const TraceConfig& trace_config,
561                           uint8_t modes_to_enable) {
562   DCHECK(trace_config.process_filter_config().IsEnabled(process_id_));
563 
564   std::vector<EnabledStateObserver*> observer_list;
565   std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
566   {
567     AutoLock lock(lock_);
568 
569     // Can't enable tracing when Flush() is in progress.
570     DCHECK(!flush_task_runner_);
571 
572     InternalTraceOptions new_options =
573         GetInternalOptionsFromTraceConfig(trace_config);
574 
575     InternalTraceOptions old_options = trace_options();
576 
577     if (dispatching_to_observer_list_) {
578       // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
579       DLOG(ERROR)
580           << "Cannot manipulate TraceLog::Enabled state from an observer.";
581       return;
582     }
583 
584     // Clear all filters from previous tracing session. These filters are not
585     // cleared at the end of tracing because some threads which hit trace event
586     // when disabling, could try to use the filters.
587     if (!enabled_modes_)
588       GetCategoryGroupFilters().clear();
589 
590     // Update trace config for recording.
591     const bool already_recording = enabled_modes_ & RECORDING_MODE;
592     if (modes_to_enable & RECORDING_MODE) {
593       if (already_recording) {
594         // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
595         // enabled crbug.com/625170.
596         DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
597                                                "tracing with a different set "
598                                                "of options.";
599         trace_config_.Merge(trace_config);
600       } else {
601         trace_config_ = trace_config;
602       }
603     }
604 
605     // Update event filters only if filtering was not enabled.
606     if (modes_to_enable & FILTERING_MODE && enabled_event_filters_.empty()) {
607       DCHECK(!trace_config.event_filters().empty());
608       enabled_event_filters_ = trace_config.event_filters();
609     }
610     // Keep the |trace_config_| updated with only enabled filters in case anyone
611     // tries to read it using |GetCurrentTraceConfig| (even if filters are
612     // empty).
613     trace_config_.SetEventFilters(enabled_event_filters_);
614 
615     enabled_modes_ |= modes_to_enable;
616     UpdateCategoryRegistry();
617 
618     // Do not notify observers or create trace buffer if only enabled for
619     // filtering or if recording was already enabled.
620     if (!(modes_to_enable & RECORDING_MODE) || already_recording)
621       return;
622 
623     if (new_options != old_options) {
624       subtle::NoBarrier_Store(&trace_options_, new_options);
625       UseNextTraceBuffer();
626     }
627 
628     num_traces_recorded_++;
629 
630     UpdateCategoryRegistry();
631 
632     dispatching_to_observer_list_ = true;
633     observer_list = enabled_state_observer_list_;
634     observer_map = async_observers_;
635   }
636   // Notify observers outside the lock in case they trigger trace events.
637   for (EnabledStateObserver* observer : observer_list)
638     observer->OnTraceLogEnabled();
639   for (const auto& it : observer_map) {
640     it.second.task_runner->PostTask(
641         FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
642                             it.second.observer));
643   }
644 
645   {
646     AutoLock lock(lock_);
647     dispatching_to_observer_list_ = false;
648   }
649 }
650 
SetArgumentFilterPredicate(const ArgumentFilterPredicate & argument_filter_predicate)651 void TraceLog::SetArgumentFilterPredicate(
652     const ArgumentFilterPredicate& argument_filter_predicate) {
653   AutoLock lock(lock_);
654   DCHECK(!argument_filter_predicate.is_null());
655   DCHECK(argument_filter_predicate_.is_null());
656   argument_filter_predicate_ = argument_filter_predicate;
657 }
658 
GetInternalOptionsFromTraceConfig(const TraceConfig & config)659 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
660     const TraceConfig& config) {
661   InternalTraceOptions ret = config.IsArgumentFilterEnabled()
662                                  ? kInternalEnableArgumentFilter
663                                  : kInternalNone;
664   switch (config.GetTraceRecordMode()) {
665     case RECORD_UNTIL_FULL:
666       return ret | kInternalRecordUntilFull;
667     case RECORD_CONTINUOUSLY:
668       return ret | kInternalRecordContinuously;
669     case ECHO_TO_CONSOLE:
670       return ret | kInternalEchoToConsole;
671     case RECORD_AS_MUCH_AS_POSSIBLE:
672       return ret | kInternalRecordAsMuchAsPossible;
673   }
674   NOTREACHED();
675   return kInternalNone;
676 }
677 
GetCurrentTraceConfig() const678 TraceConfig TraceLog::GetCurrentTraceConfig() const {
679   AutoLock lock(lock_);
680   return trace_config_;
681 }
682 
SetDisabled()683 void TraceLog::SetDisabled() {
684   AutoLock lock(lock_);
685   SetDisabledWhileLocked(RECORDING_MODE);
686 }
687 
SetDisabled(uint8_t modes_to_disable)688 void TraceLog::SetDisabled(uint8_t modes_to_disable) {
689   AutoLock lock(lock_);
690   SetDisabledWhileLocked(modes_to_disable);
691 }
692 
SetDisabledWhileLocked(uint8_t modes_to_disable)693 void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
694   lock_.AssertAcquired();
695 
696   if (!(enabled_modes_ & modes_to_disable))
697     return;
698 
699   if (dispatching_to_observer_list_) {
700     // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
701     DLOG(ERROR)
702         << "Cannot manipulate TraceLog::Enabled state from an observer.";
703     return;
704   }
705 
706   bool is_recording_mode_disabled =
707       (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
708   enabled_modes_ &= ~modes_to_disable;
709 
710   if (modes_to_disable & FILTERING_MODE)
711     enabled_event_filters_.clear();
712 
713   if (modes_to_disable & RECORDING_MODE)
714     trace_config_.Clear();
715 
716   UpdateCategoryRegistry();
717 
718   // Add metadata events and notify observers only if recording mode was
719   // disabled now.
720   if (!is_recording_mode_disabled)
721     return;
722 
723   AddMetadataEventsWhileLocked();
724 
725   // Remove metadata events so they will not get added to a subsequent trace.
726   metadata_events_.clear();
727 
728   dispatching_to_observer_list_ = true;
729   std::vector<EnabledStateObserver*> observer_list =
730       enabled_state_observer_list_;
731   std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map =
732       async_observers_;
733 
734   {
735     // Dispatch to observers outside the lock in case the observer triggers a
736     // trace event.
737     AutoUnlock unlock(lock_);
738     for (EnabledStateObserver* observer : observer_list)
739       observer->OnTraceLogDisabled();
740     for (const auto& it : observer_map) {
741       it.second.task_runner->PostTask(
742           FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
743                               it.second.observer));
744     }
745   }
746   dispatching_to_observer_list_ = false;
747 }
748 
GetNumTracesRecorded()749 int TraceLog::GetNumTracesRecorded() {
750   AutoLock lock(lock_);
751   if (!IsEnabled())
752     return -1;
753   return num_traces_recorded_;
754 }
755 
AddEnabledStateObserver(EnabledStateObserver * listener)756 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
757   AutoLock lock(lock_);
758   enabled_state_observer_list_.push_back(listener);
759 }
760 
RemoveEnabledStateObserver(EnabledStateObserver * listener)761 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
762   AutoLock lock(lock_);
763   std::vector<EnabledStateObserver*>::iterator it =
764       std::find(enabled_state_observer_list_.begin(),
765                 enabled_state_observer_list_.end(), listener);
766   if (it != enabled_state_observer_list_.end())
767     enabled_state_observer_list_.erase(it);
768 }
769 
HasEnabledStateObserver(EnabledStateObserver * listener) const770 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
771   AutoLock lock(lock_);
772   return ContainsValue(enabled_state_observer_list_, listener);
773 }
774 
GetStatus() const775 TraceLogStatus TraceLog::GetStatus() const {
776   AutoLock lock(lock_);
777   TraceLogStatus result;
778   result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
779   result.event_count = static_cast<uint32_t>(logged_events_->Size());
780   return result;
781 }
782 
BufferIsFull() const783 bool TraceLog::BufferIsFull() const {
784   AutoLock lock(lock_);
785   return logged_events_->IsFull();
786 }
787 
AddEventToThreadSharedChunkWhileLocked(TraceEventHandle * handle,bool check_buffer_is_full)788 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
789     TraceEventHandle* handle,
790     bool check_buffer_is_full) {
791   lock_.AssertAcquired();
792 
793   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
794     logged_events_->ReturnChunk(thread_shared_chunk_index_,
795                                 std::move(thread_shared_chunk_));
796   }
797 
798   if (!thread_shared_chunk_) {
799     thread_shared_chunk_ =
800         logged_events_->GetChunk(&thread_shared_chunk_index_);
801     if (check_buffer_is_full)
802       CheckIfBufferIsFullWhileLocked();
803   }
804   if (!thread_shared_chunk_)
805     return nullptr;
806 
807   size_t event_index;
808   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
809   if (trace_event && handle) {
810     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
811                event_index, handle);
812   }
813   return trace_event;
814 }
815 
CheckIfBufferIsFullWhileLocked()816 void TraceLog::CheckIfBufferIsFullWhileLocked() {
817   lock_.AssertAcquired();
818   if (logged_events_->IsFull()) {
819     if (buffer_limit_reached_timestamp_.is_null()) {
820       buffer_limit_reached_timestamp_ = OffsetNow();
821     }
822     SetDisabledWhileLocked(RECORDING_MODE);
823   }
824 }
825 
826 // Flush() works as the following:
827 // 1. Flush() is called in thread A whose task runner is saved in
828 //    flush_task_runner_;
829 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
830 //    loop to flush the thread local buffers; otherwise finish the flush;
831 // 3. FlushCurrentThread() deletes the thread local event buffer:
832 //    - The last batch of events of the thread are flushed into the main buffer;
833 //    - The message loop will be removed from thread_message_loops_;
834 //    If this is the last message loop, finish the flush;
835 // 4. If any thread hasn't finish its flush in time, finish the flush.
Flush(const TraceLog::OutputCallback & cb,bool use_worker_thread)836 void TraceLog::Flush(const TraceLog::OutputCallback& cb,
837                      bool use_worker_thread) {
838   FlushInternal(cb, use_worker_thread, false);
839 }
840 
CancelTracing(const OutputCallback & cb)841 void TraceLog::CancelTracing(const OutputCallback& cb) {
842   SetDisabled();
843   FlushInternal(cb, false, true);
844 }
845 
FlushInternal(const TraceLog::OutputCallback & cb,bool use_worker_thread,bool discard_events)846 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
847                              bool use_worker_thread,
848                              bool discard_events) {
849   use_worker_thread_ = use_worker_thread;
850   if (IsEnabled()) {
851     // Can't flush when tracing is enabled because otherwise PostTask would
852     // - generate more trace events;
853     // - deschedule the calling thread on some platforms causing inaccurate
854     //   timing of the trace events.
855     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
856     if (!cb.is_null())
857       cb.Run(empty_result, false);
858     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
859     return;
860   }
861 
862   int gen = generation();
863   // Copy of thread_message_loops_ to be used without locking.
864   std::vector<scoped_refptr<SingleThreadTaskRunner>>
865       thread_message_loop_task_runners;
866   {
867     AutoLock lock(lock_);
868     DCHECK(!flush_task_runner_);
869     flush_task_runner_ = SequencedTaskRunnerHandle::IsSet()
870                              ? SequencedTaskRunnerHandle::Get()
871                              : nullptr;
872     DCHECK(thread_message_loops_.empty() || flush_task_runner_);
873     flush_output_callback_ = cb;
874 
875     if (thread_shared_chunk_) {
876       logged_events_->ReturnChunk(thread_shared_chunk_index_,
877                                   std::move(thread_shared_chunk_));
878     }
879 
880     for (MessageLoop* loop : thread_message_loops_)
881       thread_message_loop_task_runners.push_back(loop->task_runner());
882   }
883 
884   if (!thread_message_loop_task_runners.empty()) {
885     for (auto& task_runner : thread_message_loop_task_runners) {
886       task_runner->PostTask(
887           FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
888                               gen, discard_events));
889     }
890     flush_task_runner_->PostDelayedTask(
891         FROM_HERE,
892         BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
893                  discard_events),
894         TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
895     return;
896   }
897 
898   FinishFlush(gen, discard_events);
899 }
900 
901 // Usually it runs on a different thread.
ConvertTraceEventsToTraceFormat(std::unique_ptr<TraceBuffer> logged_events,const OutputCallback & flush_output_callback,const ArgumentFilterPredicate & argument_filter_predicate)902 void TraceLog::ConvertTraceEventsToTraceFormat(
903     std::unique_ptr<TraceBuffer> logged_events,
904     const OutputCallback& flush_output_callback,
905     const ArgumentFilterPredicate& argument_filter_predicate) {
906   if (flush_output_callback.is_null())
907     return;
908 
909   HEAP_PROFILER_SCOPED_IGNORE;
910   // The callback need to be called at least once even if there is no events
911   // to let the caller know the completion of flush.
912   scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
913   const size_t kReserveCapacity = kTraceEventBufferSizeInBytes * 5 / 4;
914   json_events_str_ptr->data().reserve(kReserveCapacity);
915   while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
916     for (size_t j = 0; j < chunk->size(); ++j) {
917       size_t size = json_events_str_ptr->size();
918       if (size > kTraceEventBufferSizeInBytes) {
919         flush_output_callback.Run(json_events_str_ptr, true);
920         json_events_str_ptr = new RefCountedString();
921         json_events_str_ptr->data().reserve(kReserveCapacity);
922       } else if (size) {
923         json_events_str_ptr->data().append(",\n");
924       }
925       chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
926                                          argument_filter_predicate);
927     }
928   }
929   flush_output_callback.Run(json_events_str_ptr, false);
930 }
931 
FinishFlush(int generation,bool discard_events)932 void TraceLog::FinishFlush(int generation, bool discard_events) {
933   std::unique_ptr<TraceBuffer> previous_logged_events;
934   OutputCallback flush_output_callback;
935   ArgumentFilterPredicate argument_filter_predicate;
936 
937   if (!CheckGeneration(generation))
938     return;
939 
940   {
941     AutoLock lock(lock_);
942 
943     previous_logged_events.swap(logged_events_);
944     UseNextTraceBuffer();
945     thread_message_loops_.clear();
946 
947     flush_task_runner_ = nullptr;
948     flush_output_callback = flush_output_callback_;
949     flush_output_callback_.Reset();
950 
951     if (trace_options() & kInternalEnableArgumentFilter) {
952       CHECK(!argument_filter_predicate_.is_null());
953       argument_filter_predicate = argument_filter_predicate_;
954     }
955   }
956 
957   if (discard_events) {
958     if (!flush_output_callback.is_null()) {
959       scoped_refptr<RefCountedString> empty_result = new RefCountedString;
960       flush_output_callback.Run(empty_result, false);
961     }
962     return;
963   }
964 
965   if (use_worker_thread_) {
966     base::PostTaskWithTraits(
967         FROM_HERE,
968         {MayBlock(), TaskPriority::BACKGROUND,
969          TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
970         BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
971                  std::move(previous_logged_events), flush_output_callback,
972                  argument_filter_predicate));
973     return;
974   }
975 
976   ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
977                                   flush_output_callback,
978                                   argument_filter_predicate);
979 }
980 
981 // Run in each thread holding a local event buffer.
FlushCurrentThread(int generation,bool discard_events)982 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
983   {
984     AutoLock lock(lock_);
985     if (!CheckGeneration(generation) || !flush_task_runner_) {
986       // This is late. The corresponding flush has finished.
987       return;
988     }
989   }
990 
991   // This will flush the thread local buffer.
992   delete thread_local_event_buffer_.Get();
993 
994   // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
995   // to acquiring a tracing lock. Given that posting a task requires grabbing
996   // a scheduler lock, we need to post this task outside tracing lock to avoid
997   // deadlocks.
998   scoped_refptr<SequencedTaskRunner> cached_flush_task_runner;
999   {
1000     AutoLock lock(lock_);
1001     cached_flush_task_runner = flush_task_runner_;
1002     if (!CheckGeneration(generation) || !flush_task_runner_ ||
1003         !thread_message_loops_.empty())
1004       return;
1005   }
1006   cached_flush_task_runner->PostTask(
1007       FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
1008                           discard_events));
1009 }
1010 
OnFlushTimeout(int generation,bool discard_events)1011 void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
1012   {
1013     AutoLock lock(lock_);
1014     if (!CheckGeneration(generation) || !flush_task_runner_) {
1015       // Flush has finished before timeout.
1016       return;
1017     }
1018 
1019     LOG(WARNING)
1020         << "The following threads haven't finished flush in time. "
1021            "If this happens stably for some thread, please call "
1022            "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1023            "the thread to avoid its trace events from being lost.";
1024     for (hash_set<MessageLoop*>::const_iterator it =
1025              thread_message_loops_.begin();
1026          it != thread_message_loops_.end(); ++it) {
1027       LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
1028     }
1029   }
1030   FinishFlush(generation, discard_events);
1031 }
1032 
UseNextTraceBuffer()1033 void TraceLog::UseNextTraceBuffer() {
1034   logged_events_.reset(CreateTraceBuffer());
1035   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1036   thread_shared_chunk_.reset();
1037   thread_shared_chunk_index_ = 0;
1038 }
1039 
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int num_args,const char * const * arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,std::unique_ptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1040 TraceEventHandle TraceLog::AddTraceEvent(
1041     char phase,
1042     const unsigned char* category_group_enabled,
1043     const char* name,
1044     const char* scope,
1045     unsigned long long id,
1046     int num_args,
1047     const char* const* arg_names,
1048     const unsigned char* arg_types,
1049     const unsigned long long* arg_values,
1050     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
1051     unsigned int flags) {
1052   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1053   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1054   return AddTraceEventWithThreadIdAndTimestamp(
1055       phase,
1056       category_group_enabled,
1057       name,
1058       scope,
1059       id,
1060       trace_event_internal::kNoId,  // bind_id
1061       thread_id,
1062       now,
1063       num_args,
1064       arg_names,
1065       arg_types,
1066       arg_values,
1067       convertable_values,
1068       flags);
1069 }
1070 
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,unsigned long long bind_id,int num_args,const char * const * arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,std::unique_ptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1071 TraceEventHandle TraceLog::AddTraceEventWithBindId(
1072     char phase,
1073     const unsigned char* category_group_enabled,
1074     const char* name,
1075     const char* scope,
1076     unsigned long long id,
1077     unsigned long long bind_id,
1078     int num_args,
1079     const char* const* arg_names,
1080     const unsigned char* arg_types,
1081     const unsigned long long* arg_values,
1082     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
1083     unsigned int flags) {
1084   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1085   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1086   return AddTraceEventWithThreadIdAndTimestamp(
1087       phase,
1088       category_group_enabled,
1089       name,
1090       scope,
1091       id,
1092       bind_id,
1093       thread_id,
1094       now,
1095       num_args,
1096       arg_names,
1097       arg_types,
1098       arg_values,
1099       convertable_values,
1100       flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
1101 }
1102 
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int process_id,int num_args,const char * const * arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,std::unique_ptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1103 TraceEventHandle TraceLog::AddTraceEventWithProcessId(
1104     char phase,
1105     const unsigned char* category_group_enabled,
1106     const char* name,
1107     const char* scope,
1108     unsigned long long id,
1109     int process_id,
1110     int num_args,
1111     const char* const* arg_names,
1112     const unsigned char* arg_types,
1113     const unsigned long long* arg_values,
1114     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
1115     unsigned int flags) {
1116   base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1117   return AddTraceEventWithThreadIdAndTimestamp(
1118       phase,
1119       category_group_enabled,
1120       name,
1121       scope,
1122       id,
1123       trace_event_internal::kNoId,  // bind_id
1124       process_id,
1125       now,
1126       num_args,
1127       arg_names,
1128       arg_types,
1129       arg_values,
1130       convertable_values,
1131       flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
1132 }
1133 
1134 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1135 // with kNoId as bind_id
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int thread_id,const TimeTicks & timestamp,int num_args,const char * const * arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,std::unique_ptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1136 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1137     char phase,
1138     const unsigned char* category_group_enabled,
1139     const char* name,
1140     const char* scope,
1141     unsigned long long id,
1142     int thread_id,
1143     const TimeTicks& timestamp,
1144     int num_args,
1145     const char* const* arg_names,
1146     const unsigned char* arg_types,
1147     const unsigned long long* arg_values,
1148     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
1149     unsigned int flags) {
1150   return AddTraceEventWithThreadIdAndTimestamp(
1151       phase,
1152       category_group_enabled,
1153       name,
1154       scope,
1155       id,
1156       trace_event_internal::kNoId,  // bind_id
1157       thread_id,
1158       timestamp,
1159       num_args,
1160       arg_names,
1161       arg_types,
1162       arg_values,
1163       convertable_values,
1164       flags);
1165 }
1166 
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,unsigned long long bind_id,int thread_id,const TimeTicks & timestamp,int num_args,const char * const * arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,std::unique_ptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1167 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1168     char phase,
1169     const unsigned char* category_group_enabled,
1170     const char* name,
1171     const char* scope,
1172     unsigned long long id,
1173     unsigned long long bind_id,
1174     int thread_id,
1175     const TimeTicks& timestamp,
1176     int num_args,
1177     const char* const* arg_names,
1178     const unsigned char* arg_types,
1179     const unsigned long long* arg_values,
1180     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
1181     unsigned int flags) {
1182   TraceEventHandle handle = {0, 0, 0};
1183   if (!*category_group_enabled)
1184     return handle;
1185 
1186   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1187   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1188   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1189   if (thread_is_in_trace_event_.Get())
1190     return handle;
1191 
1192   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1193 
1194   DCHECK(name);
1195   DCHECK(!timestamp.is_null());
1196 
1197   if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
1198     if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
1199         (flags & TRACE_EVENT_FLAG_FLOW_OUT))
1200       bind_id = MangleEventId(bind_id);
1201     id = MangleEventId(id);
1202   }
1203 
1204   TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1205   ThreadTicks thread_now = ThreadNow();
1206 
1207   ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
1208   if (*category_group_enabled & RECORDING_MODE) {
1209     // |thread_local_event_buffer_| can be null if the current thread doesn't
1210     // have a message loop or the message loop is blocked.
1211     InitializeThreadLocalEventBufferIfSupported();
1212     thread_local_event_buffer = thread_local_event_buffer_.Get();
1213   }
1214 
1215   // Check and update the current thread name only if the event is for the
1216   // current thread to avoid locks in most cases.
1217   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1218     const char* new_name =
1219         ThreadIdNameManager::GetInstance()->GetName(thread_id);
1220     // Check if the thread name has been set or changed since the previous
1221     // call (if any), but don't bother if the new name is empty. Note this will
1222     // not detect a thread name change within the same char* buffer address: we
1223     // favor common case performance over corner case correctness.
1224     static auto* current_thread_name = new ThreadLocalPointer<const char>();
1225     if (new_name != current_thread_name->Get() && new_name && *new_name) {
1226       current_thread_name->Set(new_name);
1227 
1228       AutoLock thread_info_lock(thread_info_lock_);
1229 
1230       auto existing_name = thread_names_.find(thread_id);
1231       if (existing_name == thread_names_.end()) {
1232         // This is a new thread id, and a new name.
1233         thread_names_[thread_id] = new_name;
1234       } else {
1235         // This is a thread id that we've seen before, but potentially with a
1236         // new name.
1237         std::vector<StringPiece> existing_names = base::SplitStringPiece(
1238             existing_name->second, ",", base::KEEP_WHITESPACE,
1239             base::SPLIT_WANT_NONEMPTY);
1240         if (!ContainsValue(existing_names, new_name)) {
1241           if (!existing_names.empty())
1242             existing_name->second.push_back(',');
1243           existing_name->second.append(new_name);
1244         }
1245       }
1246     }
1247   }
1248 
1249 #if defined(OS_WIN)
1250   // This is done sooner rather than later, to avoid creating the event and
1251   // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1252   if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
1253     TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1254                                   num_args, arg_names, arg_types, arg_values,
1255                                   convertable_values);
1256 #endif  // OS_WIN
1257 
1258   AddTraceEventOverrideCallback trace_event_override =
1259       reinterpret_cast<AddTraceEventOverrideCallback>(
1260           subtle::NoBarrier_Load(&trace_event_override_));
1261   if (trace_event_override) {
1262     TraceEvent new_trace_event;
1263     // If we have an override in place for events, rather than sending
1264     // them to the tracelog, we don't have a way of going back and updating
1265     // the duration of _COMPLETE events. Instead, we emit separate _BEGIN
1266     // and _END events.
1267     if (phase == TRACE_EVENT_PHASE_COMPLETE)
1268       phase = TRACE_EVENT_PHASE_BEGIN;
1269 
1270     new_trace_event.Initialize(thread_id, offset_event_timestamp, thread_now,
1271                                phase, category_group_enabled, name, scope, id,
1272                                bind_id, num_args, arg_names, arg_types,
1273                                arg_values, convertable_values, flags);
1274 
1275     trace_event_override(new_trace_event);
1276     return handle;
1277   }
1278 
1279   std::string console_message;
1280   std::unique_ptr<TraceEvent> filtered_trace_event;
1281   bool disabled_by_filters = false;
1282   if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
1283     std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
1284     new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
1285                                 phase, category_group_enabled, name, scope, id,
1286                                 bind_id, num_args, arg_names, arg_types,
1287                                 arg_values, convertable_values, flags);
1288 
1289     disabled_by_filters = true;
1290     ForEachCategoryFilter(
1291         category_group_enabled, [&new_trace_event, &disabled_by_filters](
1292                                     TraceEventFilter* trace_event_filter) {
1293           if (trace_event_filter->FilterTraceEvent(*new_trace_event))
1294             disabled_by_filters = false;
1295         });
1296     if (!disabled_by_filters)
1297       filtered_trace_event = std::move(new_trace_event);
1298   }
1299 
1300   // If enabled for recording, the event should be added only if one of the
1301   // filters indicates or category is not enabled for filtering.
1302   if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
1303       !disabled_by_filters) {
1304     OptionalAutoLock lock(&lock_);
1305 
1306     TraceEvent* trace_event = nullptr;
1307     if (thread_local_event_buffer) {
1308       trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1309     } else {
1310       lock.EnsureAcquired();
1311       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1312     }
1313 
1314     if (trace_event) {
1315       if (filtered_trace_event) {
1316         trace_event->MoveFrom(std::move(filtered_trace_event));
1317       } else {
1318         trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
1319                                 phase, category_group_enabled, name, scope, id,
1320                                 bind_id, num_args, arg_names, arg_types,
1321                                 arg_values, convertable_values, flags);
1322       }
1323 
1324 #if defined(OS_ANDROID)
1325       trace_event->SendToATrace();
1326 #endif
1327     }
1328 
1329     if (trace_options() & kInternalEchoToConsole) {
1330       console_message = EventToConsoleMessage(
1331           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1332           timestamp, trace_event);
1333     }
1334   }
1335 
1336   if (!console_message.empty())
1337     LOG(ERROR) << console_message;
1338 
1339   return handle;
1340 }
1341 
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,int num_args,const char * const * arg_names,const unsigned char * arg_types,const unsigned long long * arg_values,std::unique_ptr<ConvertableToTraceFormat> * convertable_values,unsigned int flags)1342 void TraceLog::AddMetadataEvent(
1343     const unsigned char* category_group_enabled,
1344     const char* name,
1345     int num_args,
1346     const char* const* arg_names,
1347     const unsigned char* arg_types,
1348     const unsigned long long* arg_values,
1349     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
1350     unsigned int flags) {
1351   HEAP_PROFILER_SCOPED_IGNORE;
1352   std::unique_ptr<TraceEvent> trace_event(new TraceEvent);
1353   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1354   ThreadTicks thread_now = ThreadNow();
1355   TimeTicks now = OffsetNow();
1356   AutoLock lock(lock_);
1357   trace_event->Initialize(
1358       thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
1359       category_group_enabled, name,
1360       trace_event_internal::kGlobalScope,  // scope
1361       trace_event_internal::kNoId,         // id
1362       trace_event_internal::kNoId,         // bind_id
1363       num_args, arg_names, arg_types, arg_values, convertable_values, flags);
1364   metadata_events_.push_back(std::move(trace_event));
1365 }
1366 
1367 // May be called when a COMPELETE event ends and the unfinished event has been
1368 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
EventToConsoleMessage(unsigned char phase,const TimeTicks & timestamp,TraceEvent * trace_event)1369 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1370                                             const TimeTicks& timestamp,
1371                                             TraceEvent* trace_event) {
1372   HEAP_PROFILER_SCOPED_IGNORE;
1373   AutoLock thread_info_lock(thread_info_lock_);
1374 
1375   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1376   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1377   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1378 
1379   TimeDelta duration;
1380   int thread_id =
1381       trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
1382   if (phase == TRACE_EVENT_PHASE_END) {
1383     duration = timestamp - thread_event_start_times_[thread_id].top();
1384     thread_event_start_times_[thread_id].pop();
1385   }
1386 
1387   std::string thread_name = thread_names_[thread_id];
1388   if (thread_colors_.find(thread_name) == thread_colors_.end()) {
1389     size_t next_color = (thread_colors_.size() % 6) + 1;
1390     thread_colors_[thread_name] = next_color;
1391   }
1392 
1393   std::ostringstream log;
1394   log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
1395                             thread_colors_[thread_name]);
1396 
1397   size_t depth = 0;
1398   auto it = thread_event_start_times_.find(thread_id);
1399   if (it != thread_event_start_times_.end())
1400     depth = it->second.size();
1401 
1402   for (size_t i = 0; i < depth; ++i)
1403     log << "| ";
1404 
1405   if (trace_event)
1406     trace_event->AppendPrettyPrinted(&log);
1407   if (phase == TRACE_EVENT_PHASE_END)
1408     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1409 
1410   log << "\x1b[0;m";
1411 
1412   if (phase == TRACE_EVENT_PHASE_BEGIN)
1413     thread_event_start_times_[thread_id].push(timestamp);
1414 
1415   return log.str();
1416 }
1417 
EndFilteredEvent(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1418 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
1419                                 const char* name,
1420                                 TraceEventHandle handle) {
1421   const char* category_name = GetCategoryGroupName(category_group_enabled);
1422   ForEachCategoryFilter(
1423       category_group_enabled,
1424       [name, category_name](TraceEventFilter* trace_event_filter) {
1425         trace_event_filter->EndEvent(category_name, name);
1426       });
1427 }
1428 
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1429 void TraceLog::UpdateTraceEventDuration(
1430     const unsigned char* category_group_enabled,
1431     const char* name,
1432     TraceEventHandle handle) {
1433   char category_group_enabled_local = *category_group_enabled;
1434   if (!category_group_enabled_local)
1435     return;
1436 
1437   UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
1438                                    OffsetNow(), ThreadNow());
1439 }
1440 
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle,const TimeTicks & now,const ThreadTicks & thread_now)1441 void TraceLog::UpdateTraceEventDurationExplicit(
1442     const unsigned char* category_group_enabled,
1443     const char* name,
1444     TraceEventHandle handle,
1445     const TimeTicks& now,
1446     const ThreadTicks& thread_now) {
1447   char category_group_enabled_local = *category_group_enabled;
1448   if (!category_group_enabled_local)
1449     return;
1450 
1451   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1452   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1453   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1454   if (thread_is_in_trace_event_.Get())
1455     return;
1456   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1457 
1458 #if defined(OS_WIN)
1459   // Generate an ETW event that marks the end of a complete event.
1460   if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
1461     TraceEventETWExport::AddCompleteEndEvent(name);
1462 #endif  // OS_WIN
1463 
1464   std::string console_message;
1465   if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
1466     AddTraceEventOverrideCallback trace_event_override =
1467         reinterpret_cast<AddTraceEventOverrideCallback>(
1468             subtle::NoBarrier_Load(&trace_event_override_));
1469 
1470     // If we send events off to an override instead of the TraceBuffer,
1471     // we don't have way of updating the prior event so we'll emit a
1472     // separate _END event instead.
1473     if (trace_event_override) {
1474       TraceEvent new_trace_event;
1475       new_trace_event.Initialize(
1476           static_cast<int>(base::PlatformThread::CurrentId()), now, thread_now,
1477           TRACE_EVENT_PHASE_END, category_group_enabled, name,
1478           trace_event_internal::kGlobalScope,
1479           trace_event_internal::kNoId /* id */,
1480           trace_event_internal::kNoId /* bind_id */, 0, nullptr, nullptr,
1481           nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
1482       trace_event_override(new_trace_event);
1483       return;
1484     }
1485 
1486     OptionalAutoLock lock(&lock_);
1487 
1488     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1489     if (trace_event) {
1490       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1491       // TEMP(oysteine) to debug crbug.com/638744
1492       if (trace_event->duration().ToInternalValue() != -1) {
1493         DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq
1494                  << ", chunk_index " << handle.chunk_index << ", event_index "
1495                  << handle.event_index;
1496 
1497         std::string serialized_event;
1498         trace_event->AppendAsJSON(&serialized_event, ArgumentFilterPredicate());
1499         DVLOG(1) << "TraceEvent: " << serialized_event;
1500         lock_.AssertAcquired();
1501       }
1502 
1503       trace_event->UpdateDuration(now, thread_now);
1504 #if defined(OS_ANDROID)
1505       trace_event->SendToATrace();
1506 #endif
1507     }
1508 
1509     if (trace_options() & kInternalEchoToConsole) {
1510       console_message =
1511           EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
1512     }
1513   }
1514 
1515   if (!console_message.empty())
1516     LOG(ERROR) << console_message;
1517 
1518   if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
1519     EndFilteredEvent(category_group_enabled, name, handle);
1520 }
1521 
MangleEventId(uint64_t id)1522 uint64_t TraceLog::MangleEventId(uint64_t id) {
1523   return id ^ process_id_hash_;
1524 }
1525 
AddMetadataEventsWhileLocked()1526 void TraceLog::AddMetadataEventsWhileLocked() {
1527   lock_.AssertAcquired();
1528 
1529   // Move metadata added by |AddMetadataEvent| into the trace log.
1530   while (!metadata_events_.empty()) {
1531     TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false);
1532     event->MoveFrom(std::move(metadata_events_.back()));
1533     metadata_events_.pop_back();
1534   }
1535 
1536 #if !defined(OS_NACL)  // NaCl shouldn't expose the process id.
1537   InitializeMetadataEvent(
1538       AddEventToThreadSharedChunkWhileLocked(nullptr, false), 0, "num_cpus",
1539       "number", base::SysInfo::NumberOfProcessors());
1540 #endif
1541 
1542   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1543   if (process_sort_index_ != 0) {
1544     InitializeMetadataEvent(
1545         AddEventToThreadSharedChunkWhileLocked(nullptr, false),
1546         current_thread_id, "process_sort_index", "sort_index",
1547         process_sort_index_);
1548   }
1549 
1550   if (!process_name_.empty()) {
1551     InitializeMetadataEvent(
1552         AddEventToThreadSharedChunkWhileLocked(nullptr, false),
1553         current_thread_id, "process_name", "name", process_name_);
1554   }
1555 
1556   TimeDelta process_uptime = TRACE_TIME_NOW() - process_creation_time_;
1557   InitializeMetadataEvent(
1558       AddEventToThreadSharedChunkWhileLocked(nullptr, false), current_thread_id,
1559       "process_uptime_seconds", "uptime", process_uptime.InSeconds());
1560 
1561 #if defined(OS_ANDROID)
1562   InitializeMetadataEvent(
1563       AddEventToThreadSharedChunkWhileLocked(nullptr, false), current_thread_id,
1564       "chrome_library_address", "start_address",
1565       base::StringPrintf("%p", &__executable_start));
1566 #endif
1567 
1568   if (!process_labels_.empty()) {
1569     std::vector<base::StringPiece> labels;
1570     for (const auto& it : process_labels_)
1571       labels.push_back(it.second);
1572     InitializeMetadataEvent(
1573         AddEventToThreadSharedChunkWhileLocked(nullptr, false),
1574         current_thread_id, "process_labels", "labels",
1575         base::JoinString(labels, ","));
1576   }
1577 
1578   // Thread sort indices.
1579   for (const auto& it : thread_sort_indices_) {
1580     if (it.second == 0)
1581       continue;
1582     InitializeMetadataEvent(
1583         AddEventToThreadSharedChunkWhileLocked(nullptr, false), it.first,
1584         "thread_sort_index", "sort_index", it.second);
1585   }
1586 
1587   // Thread names.
1588   AutoLock thread_info_lock(thread_info_lock_);
1589   for (const auto& it : thread_names_) {
1590     if (it.second.empty())
1591       continue;
1592     InitializeMetadataEvent(
1593         AddEventToThreadSharedChunkWhileLocked(nullptr, false), it.first,
1594         "thread_name", "name", it.second);
1595   }
1596 
1597   // If buffer is full, add a metadata record to report this.
1598   if (!buffer_limit_reached_timestamp_.is_null()) {
1599     InitializeMetadataEvent(
1600         AddEventToThreadSharedChunkWhileLocked(nullptr, false),
1601         current_thread_id, "trace_buffer_overflowed", "overflowed_at_ts",
1602         buffer_limit_reached_timestamp_);
1603   }
1604 }
1605 
GetEventByHandle(TraceEventHandle handle)1606 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
1607   return GetEventByHandleInternal(handle, nullptr);
1608 }
1609 
GetEventByHandleInternal(TraceEventHandle handle,OptionalAutoLock * lock)1610 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
1611                                                OptionalAutoLock* lock) {
1612   if (!handle.chunk_seq)
1613     return nullptr;
1614 
1615   DCHECK(handle.chunk_seq);
1616   DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
1617   DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
1618 
1619   if (thread_local_event_buffer_.Get()) {
1620     TraceEvent* trace_event =
1621         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
1622     if (trace_event)
1623       return trace_event;
1624   }
1625 
1626   // The event has been out-of-control of the thread local buffer.
1627   // Try to get the event from the main buffer with a lock.
1628   if (lock)
1629     lock->EnsureAcquired();
1630 
1631   if (thread_shared_chunk_ &&
1632       handle.chunk_index == thread_shared_chunk_index_) {
1633     return handle.chunk_seq == thread_shared_chunk_->seq()
1634                ? thread_shared_chunk_->GetEventAt(handle.event_index)
1635                : nullptr;
1636   }
1637 
1638   return logged_events_->GetEventByHandle(handle);
1639 }
1640 
SetProcessID(int process_id)1641 void TraceLog::SetProcessID(int process_id) {
1642   process_id_ = process_id;
1643   // Create a FNV hash from the process ID for XORing.
1644   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1645   const unsigned long long kOffsetBasis = 14695981039346656037ull;
1646   const unsigned long long kFnvPrime = 1099511628211ull;
1647   const unsigned long long pid = static_cast<unsigned long long>(process_id_);
1648   process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
1649 }
1650 
SetProcessSortIndex(int sort_index)1651 void TraceLog::SetProcessSortIndex(int sort_index) {
1652   AutoLock lock(lock_);
1653   process_sort_index_ = sort_index;
1654 }
1655 
UpdateProcessLabel(int label_id,const std::string & current_label)1656 void TraceLog::UpdateProcessLabel(int label_id,
1657                                   const std::string& current_label) {
1658   if (!current_label.length())
1659     return RemoveProcessLabel(label_id);
1660 
1661   AutoLock lock(lock_);
1662   process_labels_[label_id] = current_label;
1663 }
1664 
RemoveProcessLabel(int label_id)1665 void TraceLog::RemoveProcessLabel(int label_id) {
1666   AutoLock lock(lock_);
1667   process_labels_.erase(label_id);
1668 }
1669 
SetThreadSortIndex(PlatformThreadId thread_id,int sort_index)1670 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
1671   AutoLock lock(lock_);
1672   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
1673 }
1674 
SetTimeOffset(TimeDelta offset)1675 void TraceLog::SetTimeOffset(TimeDelta offset) {
1676   time_offset_ = offset;
1677 }
1678 
GetObserverCountForTest() const1679 size_t TraceLog::GetObserverCountForTest() const {
1680   return enabled_state_observer_list_.size();
1681 }
1682 
SetCurrentThreadBlocksMessageLoop()1683 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
1684   thread_blocks_message_loop_.Set(true);
1685   // This will flush the thread local buffer.
1686   delete thread_local_event_buffer_.Get();
1687 }
1688 
CreateTraceBuffer()1689 TraceBuffer* TraceLog::CreateTraceBuffer() {
1690   HEAP_PROFILER_SCOPED_IGNORE;
1691   InternalTraceOptions options = trace_options();
1692   if (options & kInternalRecordContinuously) {
1693     return TraceBuffer::CreateTraceBufferRingBuffer(
1694         kTraceEventRingBufferChunks);
1695   }
1696   if (options & kInternalEchoToConsole) {
1697     return TraceBuffer::CreateTraceBufferRingBuffer(
1698         kEchoToConsoleTraceEventBufferChunks);
1699   }
1700   if (options & kInternalRecordAsMuchAsPossible) {
1701     return TraceBuffer::CreateTraceBufferVectorOfSize(
1702         kTraceEventVectorBigBufferChunks);
1703   }
1704   return TraceBuffer::CreateTraceBufferVectorOfSize(
1705       kTraceEventVectorBufferChunks);
1706 }
1707 
1708 #if defined(OS_WIN)
UpdateETWCategoryGroupEnabledFlags()1709 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
1710   // Go through each category and set/clear the ETW bit depending on whether the
1711   // category is enabled.
1712   for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
1713     if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
1714             category.name())) {
1715       category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
1716     } else {
1717       category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
1718     }
1719   }
1720 }
1721 #endif  // defined(OS_WIN)
1722 
SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer)1723 void TraceLog::SetTraceBufferForTesting(
1724     std::unique_ptr<TraceBuffer> trace_buffer) {
1725   AutoLock lock(lock_);
1726   logged_events_ = std::move(trace_buffer);
1727 }
1728 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)1729 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
1730     TraceEventMemoryOverhead* overhead) {
1731   overhead->Add(TraceEventMemoryOverhead::kConvertableToTraceFormat,
1732                 sizeof(*this));
1733 }
1734 
AddAsyncEnabledStateObserver(WeakPtr<AsyncEnabledStateObserver> listener)1735 void TraceLog::AddAsyncEnabledStateObserver(
1736     WeakPtr<AsyncEnabledStateObserver> listener) {
1737   AutoLock lock(lock_);
1738   async_observers_.insert(
1739       std::make_pair(listener.get(), RegisteredAsyncObserver(listener)));
1740 }
1741 
RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener)1742 void TraceLog::RemoveAsyncEnabledStateObserver(
1743     AsyncEnabledStateObserver* listener) {
1744   AutoLock lock(lock_);
1745   async_observers_.erase(listener);
1746 }
1747 
HasAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener) const1748 bool TraceLog::HasAsyncEnabledStateObserver(
1749     AsyncEnabledStateObserver* listener) const {
1750   AutoLock lock(lock_);
1751   return ContainsKey(async_observers_, listener);
1752 }
1753 
1754 }  // namespace trace_event
1755 }  // namespace base
1756 
1757 namespace trace_event_internal {
1758 
ScopedTraceBinaryEfficient(const char * category_group,const char * name)1759 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
1760     const char* category_group,
1761     const char* name) {
1762   // The single atom works because for now the category_group can only be "gpu".
1763   DCHECK_EQ(strcmp(category_group, "gpu"), 0);
1764   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
1765   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
1766       category_group, atomic, category_group_enabled_);
1767   name_ = name;
1768   if (*category_group_enabled_) {
1769     event_handle_ =
1770         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
1771             TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
1772             trace_event_internal::kGlobalScope,                   // scope
1773             trace_event_internal::kNoId,                          // id
1774             static_cast<int>(base::PlatformThread::CurrentId()),  // thread_id
1775             TRACE_TIME_TICKS_NOW(), trace_event_internal::kZeroNumArgs, nullptr,
1776             nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
1777   }
1778 }
1779 
~ScopedTraceBinaryEfficient()1780 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1781   if (*category_group_enabled_) {
1782     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1783                                                 event_handle_);
1784   }
1785 }
1786 
1787 }  // namespace trace_event_internal
1788