1 // Copyright (c) 2014 Google Inc.
2 //
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 
6 // This header file defines the set of trace_event macros without specifying
7 // how the events actually get collected and stored. If you need to expose trace
8 // events to some other universe, you can copy-and-paste this file as well as
9 // trace_event.h, modifying the macros contained there as necessary for the
10 // target platform. The end result is that multiple libraries can funnel events
11 // through to a shared trace event collector.
12 
13 // Trace events are for tracking application performance and resource usage.
14 // Macros are provided to track:
15 //    Begin and end of function calls
16 //    Counters
17 //
18 // Events are issued against categories. Whereas LOG's
19 // categories are statically defined, TRACE categories are created
20 // implicitly with a string. For example:
21 //   TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
22 //                        TRACE_EVENT_SCOPE_THREAD)
23 //
24 // It is often the case that one trace may belong in multiple categories at the
25 // same time. The first argument to the trace can be a comma-separated list of
26 // categories, forming a category group, like:
27 //
28 // TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
29 //
30 // We can enable/disable tracing of OnMouseOver by enabling/disabling either
31 // category.
32 //
33 // Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
34 //   TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
35 //   doSomethingCostly()
36 //   TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
37 // Note: our tools can't always determine the correct BEGIN/END pairs unless
38 // these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
39 // need them to be in separate scopes.
40 //
41 // A common use case is to trace entire function scopes. This
42 // issues a trace BEGIN and END automatically:
43 //   void doSomethingCostly() {
44 //     TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
45 //     ...
46 //   }
47 //
48 // Additional parameters can be associated with an event:
49 //   void doSomethingCostly2(int howMuch) {
50 //     TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
51 //         "howMuch", howMuch);
52 //     ...
53 //   }
54 //
55 // The trace system will automatically add to this information the
56 // current process id, thread id, and a timestamp in microseconds.
57 //
58 // To trace an asynchronous procedure such as an IPC send/receive, use
59 // ASYNC_BEGIN and ASYNC_END:
60 //   [single threaded sender code]
61 //     static int send_count = 0;
62 //     ++send_count;
63 //     TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
64 //     Send(new MyMessage(send_count));
65 //   [receive code]
66 //     void OnMyMessage(send_count) {
67 //       TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
68 //     }
69 // The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
70 // ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
71 // Pointers can be used for the ID parameter, and they will be mangled
72 // internally so that the same pointer on two different processes will not
73 // match. For example:
74 //   class MyTracedClass {
75 //    public:
76 //     MyTracedClass() {
77 //       TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
78 //     }
79 //     ~MyTracedClass() {
80 //       TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
81 //     }
82 //   }
83 //
84 // Trace event also supports counters, which is a way to track a quantity
85 // as it varies over time. Counters are created with the following macro:
86 //   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
87 //
88 // Counters are process-specific. The macro itself can be issued from any
89 // thread, however.
90 //
91 // Sometimes, you want to track two counters at once. You can do this with two
92 // counter macros:
93 //   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
94 //   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
95 // Or you can do it with a combined macro:
96 //   TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
97 //       "bytesPinned", g_myCounterValue[0],
98 //       "bytesAllocated", g_myCounterValue[1]);
99 // This indicates to the tracing UI that these counters should be displayed
100 // in a single graph, as a summed area chart.
101 //
102 // Since counters are in a global namespace, you may want to disambiguate with a
103 // unique ID, by using the TRACE_COUNTER_ID* variations.
104 //
105 // By default, trace collection is compiled in, but turned off at runtime.
106 // Collecting trace data is the responsibility of the embedding
107 // application. In Chrome's case, navigating to about:tracing will turn on
108 // tracing and display data collected across all active processes.
109 //
110 //
111 // Memory scoping note:
112 // Tracing copies the pointers, not the string content, of the strings passed
113 // in for category_group, name, and arg_names.  Thus, the following code will
114 // cause problems:
115 //     char* str = strdup("importantName");
116 //     TRACE_EVENT_INSTANT0("SUBSYSTEM", str);  // BAD!
117 //     free(str);                   // Trace system now has dangling pointer
118 //
119 // To avoid this issue with the |name| and |arg_name| parameters, use the
120 // TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
121 // Notes: The category must always be in a long-lived char* (i.e. static const).
122 //        The |arg_values|, when used, are always deep copied with the _COPY
123 //        macros.
124 //
125 // When are string argument values copied:
126 // const char* arg_values are only referenced by default:
127 //     TRACE_EVENT1("category", "name",
128 //                  "arg1", "literal string is only referenced");
129 // Use TRACE_STR_COPY to force copying of a const char*:
130 //     TRACE_EVENT1("category", "name",
131 //                  "arg1", TRACE_STR_COPY("string will be copied"));
132 // std::string arg_values are always copied:
133 //     TRACE_EVENT1("category", "name",
134 //                  "arg1", std::string("string will be copied"));
135 //
136 //
137 // Thread Safety:
138 // A thread safe singleton and mutex are used for thread safety. Category
139 // enabled flags are used to limit the performance impact when the system
140 // is not enabled.
141 //
142 // TRACE_EVENT macros first cache a pointer to a category. The categories are
143 // statically allocated and safe at all times, even after exit. Fetching a
144 // category is protected by the TraceLog::lock_. Multiple threads initializing
145 // the static variable is safe, as they will be serialized by the lock and
146 // multiple calls will return the same pointer to the category.
147 //
148 // Then the category_group_enabled flag is checked. This is a unsigned char, and
149 // not intended to be multithread safe. It optimizes access to AddTraceEvent
150 // which is threadsafe internally via TraceLog::lock_. The enabled flag may
151 // cause some threads to incorrectly call or skip calling AddTraceEvent near
152 // the time of the system being enabled or disabled. This is acceptable as
153 // we tolerate some data loss while the system is being enabled/disabled and
154 // because AddTraceEvent is threadsafe internally and checks the enabled state
155 // again under lock.
156 //
157 // Without the use of these static category pointers and enabled flags all
158 // trace points would carry a significant performance cost of acquiring a lock
159 // and resolving the category.
160 
161 #ifndef SkTraceEvent_DEFINED
162 #define SkTraceEvent_DEFINED
163 
164 #include "SkAtomics.h"
165 #include "SkEventTracer.h"
166 
167 // By default, const char* argument values are assumed to have long-lived scope
168 // and will not be copied. Use this macro to force a const char* to be copied.
169 #define TRACE_STR_COPY(str) \
170     skia::tracing_internals::TraceStringWithCopy(str)
171 
172 // By default, uint64 ID argument values are not mangled with the Process ID in
173 // TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
174 #define TRACE_ID_MANGLE(id) \
175     skia::tracing_internals::TraceID::ForceMangle(id)
176 
177 // By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
178 // macros. Use this macro to prevent Process ID mangling.
179 #define TRACE_ID_DONT_MANGLE(id) \
180     skia::tracing_internals::TraceID::DontMangle(id)
181 
182 // Records a pair of begin and end events called "name" for the current
183 // scope, with 0, 1 or 2 associated arguments. If the category is not
184 // enabled, then this does nothing.
185 // - category and name strings must have application lifetime (statics or
186 //   literals). They may not include " chars.
187 #define TRACE_EVENT0(category_group, name) \
188     INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
189 #define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
190     INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
191 #define TRACE_EVENT2( \
192     category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
193   INTERNAL_TRACE_EVENT_ADD_SCOPED( \
194       category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
195 
196 // Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
197 // Use this where |name| is too generic to accurately aggregate allocations.
198 #define TRACE_EVENT_WITH_MEMORY_TAG2( \
199     category, name, memory_tag, arg1_name, arg1_val, arg2_name, arg2_val) \
200   INTERNAL_TRACE_EVENT_ADD_SCOPED( \
201       category, name, arg1_name, arg1_val, arg2_name, arg2_val)
202 
203 // UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
204 // included in official builds.
205 
206 #if OFFICIAL_BUILD
207 #undef TRACING_IS_OFFICIAL_BUILD
208 #define TRACING_IS_OFFICIAL_BUILD 1
209 #elif !defined(TRACING_IS_OFFICIAL_BUILD)
210 #define TRACING_IS_OFFICIAL_BUILD 0
211 #endif
212 
213 #if TRACING_IS_OFFICIAL_BUILD
214 #define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
215 #define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
216     (void)0
217 #define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
218                                arg2_name, arg2_val) (void)0
219 #define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
220 #define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
221                                        arg1_name, arg1_val) (void)0
222 #define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
223                                        arg1_name, arg1_val, \
224                                        arg2_name, arg2_val) (void)0
225 #else
226 #define UNSHIPPED_TRACE_EVENT0(category_group, name) \
227     TRACE_EVENT0(category_group, name)
228 #define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
229     TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
230 #define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
231                                arg2_name, arg2_val) \
232     TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
233 #define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
234     TRACE_EVENT_INSTANT0(category_group, name, scope)
235 #define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
236                                        arg1_name, arg1_val) \
237     TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
238 #define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
239                                        arg1_name, arg1_val, \
240                                        arg2_name, arg2_val) \
241     TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
242                          arg2_name, arg2_val)
243 #endif
244 
245 // Records a single event called "name" immediately, with 0, 1 or 2
246 // associated arguments. If the category is not enabled, then this
247 // does nothing.
248 // - category and name strings must have application lifetime (statics or
249 //   literals). They may not include " chars.
250 #define TRACE_EVENT_INSTANT0(category_group, name, scope) \
251     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
252         category_group, name, TRACE_EVENT_FLAG_NONE | scope)
253 #define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
254     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
255         category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
256         arg1_name, arg1_val)
257 #define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
258                              arg2_name, arg2_val) \
259     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
260         category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
261         arg1_name, arg1_val, arg2_name, arg2_val)
262 #define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
263     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
264         category_group, name, TRACE_EVENT_FLAG_COPY | scope)
265 #define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, \
266                                   arg1_name, arg1_val) \
267     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
268         category_group, name, TRACE_EVENT_FLAG_COPY | scope, arg1_name, \
269         arg1_val)
270 #define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, \
271                                   arg1_name, arg1_val, \
272                                   arg2_name, arg2_val) \
273     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
274         category_group, name, TRACE_EVENT_FLAG_COPY | scope, \
275         arg1_name, arg1_val, arg2_name, arg2_val)
276 
277 // Sets the current sample state to the given category and name (both must be
278 // constant strings). These states are intended for a sampling profiler.
279 // Implementation note: we store category and name together because we don't
280 // want the inconsistency/expense of storing two pointers.
281 // |thread_bucket| is [0..2] and is used to statically isolate samples in one
282 // thread from others.
283 #define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
284     bucket_number, category, name)                 \
285         skia::tracing_internals::                     \
286         TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
287 
288 // Returns a current sampling state of the given bucket.
289 #define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
290     skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>::Current()
291 
292 // Creates a scope of a sampling state of the given bucket.
293 //
294 // {  // The sampling state is set within this scope.
295 //    TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
296 //    ...;
297 // }
298 #define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(                   \
299     bucket_number, category, name)                                      \
300     skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>   \
301         traceEventSamplingScope(category "\0" name);
302 
303 // Syntactic sugars for the sampling tracing in the main thread.
304 #define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
305     TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
306 #define TRACE_EVENT_GET_SAMPLING_STATE() \
307     TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
308 #define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
309     TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
310 
311 
312 // Records a single BEGIN event called "name" immediately, with 0, 1 or 2
313 // associated arguments. If the category is not enabled, then this
314 // does nothing.
315 // - category and name strings must have application lifetime (statics or
316 //   literals). They may not include " chars.
317 #define TRACE_EVENT_BEGIN0(category_group, name) \
318     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
319         category_group, name, TRACE_EVENT_FLAG_NONE)
320 #define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
321     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
322         category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
323 #define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
324         arg2_name, arg2_val) \
325     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
326         category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
327         arg2_name, arg2_val)
328 #define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
329     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
330         category_group, name, TRACE_EVENT_FLAG_COPY)
331 #define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
332     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
333         category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
334 #define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
335         arg2_name, arg2_val) \
336     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
337         category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
338         arg2_name, arg2_val)
339 
340 // Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
341 // - |id| is used to match the _BEGIN event with the _END event.
342 //   Events are considered to match if their category_group, name and id values
343 //   all match. |id| must either be a pointer or an integer value up to 64 bits.
344 //   If it's a pointer, the bits will be xored with a hash of the process ID so
345 //   that the same pointer on two different processes will not collide.
346 #define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
347         name, id, thread_id) \
348     INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
349         TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
350         timestamp, TRACE_EVENT_FLAG_NONE)
351 #define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
352         category_group, name, id, thread_id) \
353     INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
354         TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
355         timestamp, TRACE_EVENT_FLAG_COPY)
356 
357 // Records a single END event for "name" immediately. If the category
358 // is not enabled, then this does nothing.
359 // - category and name strings must have application lifetime (statics or
360 //   literals). They may not include " chars.
361 #define TRACE_EVENT_END0(category_group, name) \
362     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
363         category_group, name, TRACE_EVENT_FLAG_NONE)
364 #define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
365     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
366         category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
367 #define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, \
368         arg2_name, arg2_val) \
369     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
370         category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
371         arg2_name, arg2_val)
372 #define TRACE_EVENT_COPY_END0(category_group, name) \
373     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
374         category_group, name, TRACE_EVENT_FLAG_COPY)
375 #define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
376     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
377         category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
378 #define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
379         arg2_name, arg2_val) \
380     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
381         category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
382         arg2_name, arg2_val)
383 
384 // Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
385 // - |id| is used to match the _BEGIN event with the _END event.
386 //   Events are considered to match if their category_group, name and id values
387 //   all match. |id| must either be a pointer or an integer value up to 64 bits.
388 //   If it's a pointer, the bits will be xored with a hash of the process ID so
389 //   that the same pointer on two different processes will not collide.
390 #define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
391         name, id, thread_id) \
392     INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
393         TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
394         timestamp, TRACE_EVENT_FLAG_NONE)
395 #define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
396         category_group, name, id, thread_id) \
397     INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
398         TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
399         timestamp, TRACE_EVENT_FLAG_COPY)
400 
401 // Records the value of a counter called "name" immediately. Value
402 // must be representable as a 32 bit integer.
403 // - category and name strings must have application lifetime (statics or
404 //   literals). They may not include " chars.
405 #define TRACE_COUNTER1(category_group, name, value) \
406     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
407         category_group, name, TRACE_EVENT_FLAG_NONE, \
408         "value", static_cast<int>(value))
409 #define TRACE_COPY_COUNTER1(category_group, name, value) \
410     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
411         category_group, name, TRACE_EVENT_FLAG_COPY, \
412         "value", static_cast<int>(value))
413 
414 // Records the values of a multi-parted counter called "name" immediately.
415 // The UI will treat value1 and value2 as parts of a whole, displaying their
416 // values as a stacked-bar chart.
417 // - category and name strings must have application lifetime (statics or
418 //   literals). They may not include " chars.
419 #define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
420         value2_name, value2_val) \
421     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
422         category_group, name, TRACE_EVENT_FLAG_NONE, \
423         value1_name, static_cast<int>(value1_val), \
424         value2_name, static_cast<int>(value2_val))
425 #define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
426         value2_name, value2_val) \
427     INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
428         category_group, name, TRACE_EVENT_FLAG_COPY, \
429         value1_name, static_cast<int>(value1_val), \
430         value2_name, static_cast<int>(value2_val))
431 
432 // Records the value of a counter called "name" immediately. Value
433 // must be representable as a 32 bit integer.
434 // - category and name strings must have application lifetime (statics or
435 //   literals). They may not include " chars.
436 // - |id| is used to disambiguate counters with the same name. It must either
437 //   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
438 //   will be xored with a hash of the process ID so that the same pointer on
439 //   two different processes will not collide.
440 #define TRACE_COUNTER_ID1(category_group, name, id, value) \
441     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
442         category_group, name, id, TRACE_EVENT_FLAG_NONE, \
443         "value", static_cast<int>(value))
444 #define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
445     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
446         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
447         "value", static_cast<int>(value))
448 
449 // Records the values of a multi-parted counter called "name" immediately.
450 // The UI will treat value1 and value2 as parts of a whole, displaying their
451 // values as a stacked-bar chart.
452 // - category and name strings must have application lifetime (statics or
453 //   literals). They may not include " chars.
454 // - |id| is used to disambiguate counters with the same name. It must either
455 //   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
456 //   will be xored with a hash of the process ID so that the same pointer on
457 //   two different processes will not collide.
458 #define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
459         value2_name, value2_val) \
460     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
461         category_group, name, id, TRACE_EVENT_FLAG_NONE, \
462         value1_name, static_cast<int>(value1_val), \
463         value2_name, static_cast<int>(value2_val))
464 #define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
465         value1_val, value2_name, value2_val) \
466     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
467         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
468         value1_name, static_cast<int>(value1_val), \
469         value2_name, static_cast<int>(value2_val))
470 
471 
472 // Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
473 // associated arguments. If the category is not enabled, then this
474 // does nothing.
475 // - category and name strings must have application lifetime (statics or
476 //   literals). They may not include " chars.
477 // - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
478 //   events are considered to match if their category_group, name and id values
479 //   all match. |id| must either be a pointer or an integer value up to 64 bits.
480 //   If it's a pointer, the bits will be xored with a hash of the process ID so
481 //   that the same pointer on two different processes will not collide.
482 //
483 // An asynchronous operation can consist of multiple phases. The first phase is
484 // defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
485 // ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
486 // annotate the block following the call. The ASYNC_STEP_PAST macro will
487 // annotate the block prior to the call. Note that any particular event must use
488 // only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
489 // operation completes, call ASYNC_END.
490 //
491 // An ASYNC trace typically occurs on a single thread (if not, they will only be
492 // drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
493 // operation must use the same |name| and |id|. Each step can have its own
494 // args.
495 #define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
496     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
497         category_group, name, id, TRACE_EVENT_FLAG_NONE)
498 #define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
499         arg1_val) \
500     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
501         category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
502 #define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
503         arg1_val, arg2_name, arg2_val) \
504     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
505         category_group, name, id, TRACE_EVENT_FLAG_NONE, \
506         arg1_name, arg1_val, arg2_name, arg2_val)
507 #define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
508     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
509         category_group, name, id, TRACE_EVENT_FLAG_COPY)
510 #define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
511         arg1_val) \
512     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
513         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
514         arg1_name, arg1_val)
515 #define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
516         arg1_val, arg2_name, arg2_val) \
517     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
518         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
519         arg1_name, arg1_val, arg2_name, arg2_val)
520 
521 // Records a single ASYNC_STEP_INTO event for |step| immediately. If the
522 // category is not enabled, then this does nothing. The |name| and |id| must
523 // match the ASYNC_BEGIN event above. The |step| param identifies this step
524 // within the async event. This should be called at the beginning of the next
525 // phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
526 // ASYNC_STEP_PAST events.
527 #define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
528     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
529         category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
530 #define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
531                                      arg1_name, arg1_val) \
532     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
533         category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
534         arg1_name, arg1_val)
535 
536 // Records a single ASYNC_STEP_PAST event for |step| immediately. If the
537 // category is not enabled, then this does nothing. The |name| and |id| must
538 // match the ASYNC_BEGIN event above. The |step| param identifies this step
539 // within the async event. This should be called at the beginning of the next
540 // phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
541 // ASYNC_STEP_INTO events.
542 #define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
543     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
544         category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
545 #define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
546                                      arg1_name, arg1_val) \
547     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
548         category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
549         arg1_name, arg1_val)
550 
551 // Records a single ASYNC_END event for "name" immediately. If the category
552 // is not enabled, then this does nothing.
553 #define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
554     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
555         category_group, name, id, TRACE_EVENT_FLAG_NONE)
556 #define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
557     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
558         category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
559 #define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
560         arg2_name, arg2_val) \
561     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
562         category_group, name, id, TRACE_EVENT_FLAG_NONE, \
563         arg1_name, arg1_val, arg2_name, arg2_val)
564 #define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
565     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
566         category_group, name, id, TRACE_EVENT_FLAG_COPY)
567 #define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
568         arg1_val) \
569     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
570         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
571         arg1_name, arg1_val)
572 #define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
573         arg1_val, arg2_name, arg2_val) \
574     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
575         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
576         arg1_name, arg1_val, arg2_name, arg2_val)
577 
578 
579 // Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
580 // associated arguments. If the category is not enabled, then this
581 // does nothing.
582 // - category and name strings must have application lifetime (statics or
583 //   literals). They may not include " chars.
584 // - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
585 //   events are considered to match if their category_group, name and id values
586 //   all match. |id| must either be a pointer or an integer value up to 64 bits.
587 //   If it's a pointer, the bits will be xored with a hash of the process ID so
588 //   that the same pointer on two different processes will not collide.
589 // FLOW events are different from ASYNC events in how they are drawn by the
590 // tracing UI. A FLOW defines asynchronous data flow, such as posting a task
591 // (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
592 // drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
593 // to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
594 // by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
595 // macros. When the operation completes, call FLOW_END. An async operation can
596 // span threads and processes, but all events in that operation must use the
597 // same |name| and |id|. Each event can have its own args.
598 #define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
599     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
600         category_group, name, id, TRACE_EVENT_FLAG_NONE)
601 #define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
602     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
603         category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
604 #define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
605         arg2_name, arg2_val) \
606     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
607         category_group, name, id, TRACE_EVENT_FLAG_NONE, \
608         arg1_name, arg1_val, arg2_name, arg2_val)
609 #define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
610     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
611         category_group, name, id, TRACE_EVENT_FLAG_COPY)
612 #define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
613         arg1_val) \
614     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
615         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
616         arg1_name, arg1_val)
617 #define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
618         arg1_val, arg2_name, arg2_val) \
619     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
620         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
621         arg1_name, arg1_val, arg2_name, arg2_val)
622 
623 // Records a single FLOW_STEP event for |step| immediately. If the category
624 // is not enabled, then this does nothing. The |name| and |id| must match the
625 // FLOW_BEGIN event above. The |step| param identifies this step within the
626 // async event. This should be called at the beginning of the next phase of an
627 // asynchronous operation.
628 #define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
629     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
630         category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
631 #define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, \
632         arg1_name, arg1_val) \
633     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
634         category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
635         arg1_name, arg1_val)
636 #define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
637     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
638         category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step)
639 #define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, \
640         arg1_name, arg1_val) \
641     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
642         category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \
643         arg1_name, arg1_val)
644 
645 // Records a single FLOW_END event for "name" immediately. If the category
646 // is not enabled, then this does nothing.
647 #define TRACE_EVENT_FLOW_END0(category_group, name, id) \
648     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
649         category_group, name, id, TRACE_EVENT_FLAG_NONE)
650 #define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
651     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
652         category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
653 #define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
654         arg2_name, arg2_val) \
655     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
656         category_group, name, id, TRACE_EVENT_FLAG_NONE, \
657         arg1_name, arg1_val, arg2_name, arg2_val)
658 #define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
659     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
660         category_group, name, id, TRACE_EVENT_FLAG_COPY)
661 #define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
662         arg1_val) \
663     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
664         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
665         arg1_name, arg1_val)
666 #define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
667         arg1_val, arg2_name, arg2_val) \
668     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
669         category_group, name, id, TRACE_EVENT_FLAG_COPY, \
670         arg1_name, arg1_val, arg2_name, arg2_val)
671 
672 // Macros to track the life time and value of arbitrary client objects.
673 // See also TraceTrackableObject.
674 #define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
675     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT, \
676         category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
677 
678 #define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) \
679     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, \
680         category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE,\
681         "snapshot", snapshot)
682 
683 #define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
684     INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT, \
685         category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
686 
687 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
688     *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
689         (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \
690          SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags)
691 
692 // Macro to efficiently determine if a given category group is enabled.
693 #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
694     do { \
695       INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
696       if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
697         *ret = true; \
698       } else { \
699         *ret = false; \
700       } \
701     } while (0)
702 
703 // Macro to efficiently determine, through polling, if a new trace has begun.
704 #define TRACE_EVENT_IS_NEW_TRACE(ret) \
705     do { \
706       static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
707       int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
708       if (num_traces_recorded != -1 && \
709           num_traces_recorded != \
710           INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
711         INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = \
712             num_traces_recorded; \
713         *ret = true; \
714       } else { \
715         *ret = false; \
716       } \
717     } while (0)
718 
719 ////////////////////////////////////////////////////////////////////////////////
720 // Implementation specific tracing API definitions.
721 
722 // Get a pointer to the enabled state of the given trace category. Only
723 // long-lived literal strings should be given as the category group. The
724 // returned pointer can be held permanently in a local static for example. If
725 // the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
726 // TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
727 // between the load of the tracing state and the call to
728 // TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
729 // for best performance when tracing is disabled.
730 // const uint8_t*
731 //     TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
732 #define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
733     SkEventTracer::GetInstance()->getCategoryGroupEnabled
734 
735 // Get the number of times traces have been recorded. This is used to implement
736 // the TRACE_EVENT_IS_NEW_TRACE facility.
737 // unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
738 #define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
739     SkEventTracer::GetInstance()->getNumTracesRecorded
740 
741 // Add a trace event to the platform tracing system.
742 // SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT(
743 //                    char phase,
744 //                    const uint8_t* category_group_enabled,
745 //                    const char* name,
746 //                    uint64_t id,
747 //                    int num_args,
748 //                    const char** arg_names,
749 //                    const uint8_t* arg_types,
750 //                    const uint64_t* arg_values,
751 //                    unsigned char flags)
752 #define TRACE_EVENT_API_ADD_TRACE_EVENT \
753     SkEventTracer::GetInstance()->addTraceEvent
754 
755 // Set the duration field of a COMPLETE trace event.
756 // void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
757 //     const uint8_t* category_group_enabled,
758 //     const char* name,
759 //     SkEventTracer::Handle id)
760 #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
761     SkEventTracer::GetInstance()->updateTraceEventDuration
762 
763 #define TRACE_EVENT_API_ATOMIC_WORD intptr_t
764 #define TRACE_EVENT_API_ATOMIC_LOAD(var) sk_atomic_load(&var, sk_memory_order_relaxed)
765 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
766     sk_atomic_store(&var, value, sk_memory_order_relaxed)
767 
768 // Defines visibility for classes in trace_event.h
769 #define TRACE_EVENT_API_CLASS_EXPORT SK_API
770 
771 // The thread buckets for the sampling profiler.
772 TRACE_EVENT_API_CLASS_EXPORT extern \
773     TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
774 
775 #define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket)                           \
776     g_trace_state[thread_bucket]
777 
778 ////////////////////////////////////////////////////////////////////////////////
779 
780 // Implementation detail: trace event macros create temporary variables
781 // to keep instrumentation overhead low. These macros give each temporary
782 // variable a unique name based on the line number to prevent name collisions.
783 #define INTERNAL_TRACE_EVENT_UID3(a,b) \
784     trace_event_unique_##a##b
785 #define INTERNAL_TRACE_EVENT_UID2(a,b) \
786     INTERNAL_TRACE_EVENT_UID3(a,b)
787 #define INTERNAL_TRACE_EVENT_UID(name_prefix) \
788     INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
789 
790 // Implementation detail: internal macro to create static category.
791 // No barriers are needed, because this code is designed to operate safely
792 // even when the unsigned char* points to garbage data (which may be the case
793 // on processors without cache coherency).
794 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
795     category_group, atomic, category_group_enabled) \
796     category_group_enabled = \
797         reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD( \
798             atomic)); \
799     if (!category_group_enabled) { \
800       category_group_enabled = \
801           TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
802       TRACE_EVENT_API_ATOMIC_STORE(atomic, \
803           reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
804               category_group_enabled)); \
805     }
806 
807 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
808     static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
809     const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
810     INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
811         INTERNAL_TRACE_EVENT_UID(atomic), \
812         INTERNAL_TRACE_EVENT_UID(category_group_enabled));
813 
814 // Implementation detail: internal macro to create static category and add
815 // event if the category is enabled.
816 #define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
817     do { \
818       INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
819       if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
820         skia::tracing_internals::AddTraceEvent( \
821             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
822             skia::tracing_internals::kNoEventId, flags, ##__VA_ARGS__); \
823       } \
824     } while (0)
825 
826 // Implementation detail: internal macro to create static category and add begin
827 // event if the category is enabled. Also adds the end event when the scope
828 // ends.
829 #define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
830     INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
831     skia::tracing_internals::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
832     if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
833       SkEventTracer::Handle h = skia::tracing_internals::AddTraceEvent( \
834           TRACE_EVENT_PHASE_COMPLETE, \
835           INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
836           name, skia::tracing_internals::kNoEventId, \
837           TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
838       INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
839           INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
840     }
841 
842 // Implementation detail: internal macro to create static category and add
843 // event if the category is enabled.
844 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
845                                          flags, ...) \
846     do { \
847       INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
848       if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
849         unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
850         skia::tracing_internals::TraceID trace_event_trace_id( \
851             id, &trace_event_flags); \
852         skia::tracing_internals::AddTraceEvent( \
853             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
854             name, trace_event_trace_id.data(), trace_event_flags, \
855             ##__VA_ARGS__); \
856       } \
857     } while (0)
858 
859 // Implementation detail: internal macro to create static category and add
860 // event if the category is enabled.
861 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(phase, \
862         category_group, name, id, thread_id, flags, ...) \
863     do { \
864       INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
865       if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
866         unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
867         skia::tracing_internals::TraceID trace_event_trace_id( \
868             id, &trace_event_flags); \
869         skia::tracing_internals::AddTraceEventWithThreadIdAndTimestamp( \
870             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
871             name, trace_event_trace_id.data(), \
872             thread_id, base::TimeTicks::FromInternalValue(timestamp), \
873             trace_event_flags, ##__VA_ARGS__); \
874       } \
875     } while (0)
876 
877 // Notes regarding the following definitions:
878 // New values can be added and propagated to third party libraries, but existing
879 // definitions must never be changed, because third party libraries may use old
880 // definitions.
881 
882 // Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
883 #define TRACE_EVENT_PHASE_BEGIN    ('B')
884 #define TRACE_EVENT_PHASE_END      ('E')
885 #define TRACE_EVENT_PHASE_COMPLETE ('X')
886 #define TRACE_EVENT_PHASE_INSTANT  ('i')
887 #define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
888 #define TRACE_EVENT_PHASE_ASYNC_STEP_INTO  ('T')
889 #define TRACE_EVENT_PHASE_ASYNC_STEP_PAST  ('p')
890 #define TRACE_EVENT_PHASE_ASYNC_END   ('F')
891 #define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
892 #define TRACE_EVENT_PHASE_FLOW_STEP  ('t')
893 #define TRACE_EVENT_PHASE_FLOW_END   ('f')
894 #define TRACE_EVENT_PHASE_METADATA ('M')
895 #define TRACE_EVENT_PHASE_COUNTER  ('C')
896 #define TRACE_EVENT_PHASE_SAMPLE  ('P')
897 #define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
898 #define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
899 #define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
900 
901 // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
902 #define TRACE_EVENT_FLAG_NONE         (static_cast<unsigned char>(0))
903 #define TRACE_EVENT_FLAG_COPY         (static_cast<unsigned char>(1 << 0))
904 #define TRACE_EVENT_FLAG_HAS_ID       (static_cast<unsigned char>(1 << 1))
905 #define TRACE_EVENT_FLAG_MANGLE_ID    (static_cast<unsigned char>(1 << 2))
906 #define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned char>(1 << 3))
907 
908 #define TRACE_EVENT_FLAG_SCOPE_MASK   (static_cast<unsigned char>( \
909     TRACE_EVENT_FLAG_SCOPE_OFFSET | (TRACE_EVENT_FLAG_SCOPE_OFFSET << 1)))
910 
911 // Type values for identifying types in the TraceValue union.
912 #define TRACE_VALUE_TYPE_BOOL         (static_cast<unsigned char>(1))
913 #define TRACE_VALUE_TYPE_UINT         (static_cast<unsigned char>(2))
914 #define TRACE_VALUE_TYPE_INT          (static_cast<unsigned char>(3))
915 #define TRACE_VALUE_TYPE_DOUBLE       (static_cast<unsigned char>(4))
916 #define TRACE_VALUE_TYPE_POINTER      (static_cast<unsigned char>(5))
917 #define TRACE_VALUE_TYPE_STRING       (static_cast<unsigned char>(6))
918 #define TRACE_VALUE_TYPE_COPY_STRING  (static_cast<unsigned char>(7))
919 #define TRACE_VALUE_TYPE_CONVERTABLE  (static_cast<unsigned char>(8))
920 
921 // Enum reflecting the scope of an INSTANT event. Must fit within
922 // TRACE_EVENT_FLAG_SCOPE_MASK.
923 #define TRACE_EVENT_SCOPE_GLOBAL  (static_cast<unsigned char>(0 << 3))
924 #define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
925 #define TRACE_EVENT_SCOPE_THREAD  (static_cast<unsigned char>(2 << 3))
926 
927 #define TRACE_EVENT_SCOPE_NAME_GLOBAL  ('g')
928 #define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
929 #define TRACE_EVENT_SCOPE_NAME_THREAD  ('t')
930 
931 namespace skia {
932 namespace tracing_internals {
933 
934 // Specify these values when the corresponding argument of AddTraceEvent is not
935 // used.
936 const int kZeroNumArgs = 0;
937 const uint64_t kNoEventId = 0;
938 
939 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
940 // are by default mangled with the Process ID so that they are unlikely to
941 // collide when the same pointer is used on different processes.
942 class TraceID {
943  public:
944   class DontMangle {
945    public:
DontMangle(const void * id)946     explicit DontMangle(const void* id)
947         : data_(static_cast<uint64_t>(
948               reinterpret_cast<uintptr_t>(id))) {}
DontMangle(uint64_t id)949     explicit DontMangle(uint64_t id) : data_(id) {}
DontMangle(unsigned int id)950     explicit DontMangle(unsigned int id) : data_(id) {}
DontMangle(unsigned short id)951     explicit DontMangle(unsigned short id) : data_(id) {}
DontMangle(unsigned char id)952     explicit DontMangle(unsigned char id) : data_(id) {}
DontMangle(long long id)953     explicit DontMangle(long long id)
954         : data_(static_cast<uint64_t>(id)) {}
DontMangle(long id)955     explicit DontMangle(long id)
956         : data_(static_cast<uint64_t>(id)) {}
DontMangle(int id)957     explicit DontMangle(int id)
958         : data_(static_cast<uint64_t>(id)) {}
DontMangle(short id)959     explicit DontMangle(short id)
960         : data_(static_cast<uint64_t>(id)) {}
DontMangle(signed char id)961     explicit DontMangle(signed char id)
962         : data_(static_cast<uint64_t>(id)) {}
data()963     uint64_t data() const { return data_; }
964    private:
965     uint64_t data_;
966   };
967 
968   class ForceMangle {
969    public:
ForceMangle(uint64_t id)970     explicit ForceMangle(uint64_t id) : data_(id) {}
ForceMangle(unsigned int id)971     explicit ForceMangle(unsigned int id) : data_(id) {}
ForceMangle(unsigned short id)972     explicit ForceMangle(unsigned short id) : data_(id) {}
ForceMangle(unsigned char id)973     explicit ForceMangle(unsigned char id) : data_(id) {}
ForceMangle(long long id)974     explicit ForceMangle(long long id)
975         : data_(static_cast<uint64_t>(id)) {}
ForceMangle(long id)976     explicit ForceMangle(long id)
977         : data_(static_cast<uint64_t>(id)) {}
ForceMangle(int id)978     explicit ForceMangle(int id)
979         : data_(static_cast<uint64_t>(id)) {}
ForceMangle(short id)980     explicit ForceMangle(short id)
981         : data_(static_cast<uint64_t>(id)) {}
ForceMangle(signed char id)982     explicit ForceMangle(signed char id)
983         : data_(static_cast<uint64_t>(id)) {}
data()984     uint64_t data() const { return data_; }
985    private:
986     uint64_t data_;
987   };
988 
TraceID(const void * id,unsigned char * flags)989   TraceID(const void* id, unsigned char* flags)
990       : data_(static_cast<uint64_t>(
991               reinterpret_cast<uintptr_t>(id))) {
992     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
993   }
TraceID(ForceMangle id,unsigned char * flags)994   TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
995     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
996   }
TraceID(DontMangle id,unsigned char * flags)997   TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) {
998   }
TraceID(uint64_t id,unsigned char * flags)999   TraceID(uint64_t id, unsigned char* flags)
1000       : data_(id) { (void)flags; }
TraceID(unsigned int id,unsigned char * flags)1001   TraceID(unsigned int id, unsigned char* flags)
1002       : data_(id) { (void)flags; }
TraceID(unsigned short id,unsigned char * flags)1003   TraceID(unsigned short id, unsigned char* flags)
1004       : data_(id) { (void)flags; }
TraceID(unsigned char id,unsigned char * flags)1005   TraceID(unsigned char id, unsigned char* flags)
1006       : data_(id) { (void)flags; }
TraceID(long long id,unsigned char * flags)1007   TraceID(long long id, unsigned char* flags)
1008       : data_(static_cast<uint64_t>(id)) { (void)flags; }
TraceID(long id,unsigned char * flags)1009   TraceID(long id, unsigned char* flags)
1010       : data_(static_cast<uint64_t>(id)) { (void)flags; }
TraceID(int id,unsigned char * flags)1011   TraceID(int id, unsigned char* flags)
1012       : data_(static_cast<uint64_t>(id)) { (void)flags; }
TraceID(short id,unsigned char * flags)1013   TraceID(short id, unsigned char* flags)
1014       : data_(static_cast<uint64_t>(id)) { (void)flags; }
TraceID(signed char id,unsigned char * flags)1015   TraceID(signed char id, unsigned char* flags)
1016       : data_(static_cast<uint64_t>(id)) { (void)flags; }
1017 
data()1018   uint64_t data() const { return data_; }
1019 
1020  private:
1021   uint64_t data_;
1022 };
1023 
1024 // Simple union to store various types as uint64_t.
1025 union TraceValueUnion {
1026   bool as_bool;
1027   uint64_t as_uint;
1028   long long as_int;
1029   double as_double;
1030   const void* as_pointer;
1031   const char* as_string;
1032 };
1033 
1034 // Simple container for const char* that should be copied instead of retained.
1035 class TraceStringWithCopy {
1036  public:
TraceStringWithCopy(const char * str)1037   explicit TraceStringWithCopy(const char* str) : str_(str) {}
1038   operator const char* () const { return str_; }
1039  private:
1040   const char* str_;
1041 };
1042 
1043 // Define SetTraceValue for each allowed type. It stores the type and
1044 // value in the return arguments. This allows this API to avoid declaring any
1045 // structures so that it is portable to third_party libraries.
1046 #define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
1047                                          union_member, \
1048                                          value_type_id) \
1049     static inline void SetTraceValue( \
1050         actual_type arg, \
1051         unsigned char* type, \
1052         uint64_t* value) { \
1053       TraceValueUnion type_value; \
1054       type_value.union_member = arg; \
1055       *type = value_type_id; \
1056       *value = type_value.as_uint; \
1057     }
1058 // Simpler form for int types that can be safely casted.
1059 #define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
1060                                              value_type_id) \
1061     static inline void SetTraceValue( \
1062         actual_type arg, \
1063         unsigned char* type, \
1064         uint64_t* value) { \
1065       *type = value_type_id; \
1066       *value = static_cast<uint64_t>(arg); \
1067     }
1068 
INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t,TRACE_VALUE_TYPE_UINT)1069 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
1070 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
1071 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
1072 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
1073 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
1074 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
1075 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
1076 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
1077 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
1078 INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
1079 INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
1080 INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
1081                                  TRACE_VALUE_TYPE_POINTER)
1082 INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
1083                                  TRACE_VALUE_TYPE_STRING)
1084 INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
1085                                  TRACE_VALUE_TYPE_COPY_STRING)
1086 
1087 #undef INTERNAL_DECLARE_SET_TRACE_VALUE
1088 #undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
1089 
1090 // These AddTraceEvent and AddTraceEvent template
1091 // functions are defined here instead of in the macro, because the arg_values
1092 // could be temporary objects, such as std::string. In order to store
1093 // pointers to the internal c_str and pass through to the tracing API,
1094 // the arg_values must live throughout these procedures.
1095 
1096 static inline SkEventTracer::Handle
1097 AddTraceEvent(
1098     char phase,
1099     const uint8_t* category_group_enabled,
1100     const char* name,
1101     uint64_t id,
1102     unsigned char flags) {
1103   return TRACE_EVENT_API_ADD_TRACE_EVENT(
1104       phase, category_group_enabled, name, id,
1105       kZeroNumArgs, NULL, NULL, NULL, flags);
1106 }
1107 
1108 template<class ARG1_TYPE>
1109 static inline SkEventTracer::Handle
AddTraceEvent(char phase,const uint8_t * category_group_enabled,const char * name,uint64_t id,unsigned char flags,const char * arg1_name,const ARG1_TYPE & arg1_val)1110 AddTraceEvent(
1111     char phase,
1112     const uint8_t* category_group_enabled,
1113     const char* name,
1114     uint64_t id,
1115     unsigned char flags,
1116     const char* arg1_name,
1117     const ARG1_TYPE& arg1_val) {
1118   const int num_args = 1;
1119   uint8_t arg_types[1];
1120   uint64_t arg_values[1];
1121   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
1122   return TRACE_EVENT_API_ADD_TRACE_EVENT(
1123       phase, category_group_enabled, name, id,
1124       num_args, &arg1_name, arg_types, arg_values, flags);
1125 }
1126 
1127 template<class ARG1_TYPE, class ARG2_TYPE>
1128 static inline SkEventTracer::Handle
AddTraceEvent(char phase,const uint8_t * category_group_enabled,const char * name,uint64_t id,unsigned char flags,const char * arg1_name,const ARG1_TYPE & arg1_val,const char * arg2_name,const ARG2_TYPE & arg2_val)1129 AddTraceEvent(
1130     char phase,
1131     const uint8_t* category_group_enabled,
1132     const char* name,
1133     uint64_t id,
1134     unsigned char flags,
1135     const char* arg1_name,
1136     const ARG1_TYPE& arg1_val,
1137     const char* arg2_name,
1138     const ARG2_TYPE& arg2_val) {
1139   const int num_args = 2;
1140   const char* arg_names[2] = { arg1_name, arg2_name };
1141   unsigned char arg_types[2];
1142   uint64_t arg_values[2];
1143   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
1144   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
1145   return TRACE_EVENT_API_ADD_TRACE_EVENT(
1146       phase, category_group_enabled, name, id,
1147       num_args, arg_names, arg_types, arg_values, flags);
1148 }
1149 
1150 // Used by TRACE_EVENTx macros. Do not use directly.
1151 class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
1152  public:
1153   // Note: members of data_ intentionally left uninitialized. See Initialize.
ScopedTracer()1154   ScopedTracer() : p_data_(NULL) {}
1155 
~ScopedTracer()1156   ~ScopedTracer() {
1157     if (p_data_ && *data_.category_group_enabled)
1158       TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
1159           data_.category_group_enabled, data_.name, data_.event_handle);
1160   }
1161 
Initialize(const uint8_t * category_group_enabled,const char * name,SkEventTracer::Handle event_handle)1162   void Initialize(const uint8_t* category_group_enabled,
1163                   const char* name,
1164                   SkEventTracer::Handle event_handle) {
1165     data_.category_group_enabled = category_group_enabled;
1166     data_.name = name;
1167     data_.event_handle = event_handle;
1168     p_data_ = &data_;
1169   }
1170 
1171  private:
1172   // This Data struct workaround is to avoid initializing all the members
1173   // in Data during construction of this object, since this object is always
1174   // constructed, even when tracing is disabled. If the members of Data were
1175   // members of this class instead, compiler warnings occur about potential
1176   // uninitialized accesses.
1177   struct Data {
1178     const uint8_t* category_group_enabled;
1179     const char* name;
1180     SkEventTracer::Handle event_handle;
1181   };
1182   Data* p_data_;
1183   Data data_;
1184 };
1185 
1186 // Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
1187 class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
1188  public:
1189   ScopedTraceBinaryEfficient(const char* category_group, const char* name);
1190   ~ScopedTraceBinaryEfficient();
1191 
1192  private:
1193   const uint8_t* category_group_enabled_;
1194   const char* name_;
1195   SkEventTracer::Handle event_handle_;
1196 };
1197 
1198 // This macro generates less code then TRACE_EVENT0 but is also
1199 // slower to execute when tracing is off. It should generally only be
1200 // used with code that is seldom executed or conditionally executed
1201 // when debugging.
1202 // For now the category_group must be "gpu".
1203 #define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
1204     skia::tracing_internals::ScopedTraceBinaryEfficient \
1205         INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
1206 
1207 // TraceEventSamplingStateScope records the current sampling state
1208 // and sets a new sampling state. When the scope exists, it restores
1209 // the sampling state having recorded.
1210 template<size_t BucketNumber>
1211 class TraceEventSamplingStateScope {
1212  public:
TraceEventSamplingStateScope(const char * category_and_name)1213   TraceEventSamplingStateScope(const char* category_and_name) {
1214     previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
1215     TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
1216   }
1217 
~TraceEventSamplingStateScope()1218   ~TraceEventSamplingStateScope() {
1219     TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
1220   }
1221 
Current()1222   static inline const char* Current() {
1223     return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
1224       g_trace_state[BucketNumber]));
1225   }
1226 
Set(const char * category_and_name)1227   static inline void Set(const char* category_and_name) {
1228     TRACE_EVENT_API_ATOMIC_STORE(
1229       g_trace_state[BucketNumber],
1230       reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
1231         const_cast<char*>(category_and_name)));
1232   }
1233 
1234  private:
1235   const char* previous_state_;
1236 };
1237 
1238 }  // namespace tracing_internals
1239 }  // namespace skia
1240 
1241 #endif
1242