1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22
23 #include "bionic/pthread_internal.h"
24 #include "private/bionic_lock.h"
25 #include "private/bionic_systrace.h"
26 #include "private/bionic_tls.h"
27 #include "private/CachedProperty.h"
28
29 #include <async_safe/log.h>
30 #include <cutils/trace.h> // For ATRACE_TAG_BIONIC.
31
32 #define WRITE_OFFSET 32
33
34 static Lock g_lock;
35 static CachedProperty g_debug_atrace_tags_enableflags("debug.atrace.tags.enableflags");
36 static uint64_t g_tags;
37 static int g_trace_marker_fd = -1;
38
should_trace()39 static bool should_trace() {
40 g_lock.lock();
41 if (g_debug_atrace_tags_enableflags.DidChange()) {
42 g_tags = strtoull(g_debug_atrace_tags_enableflags.Get(), nullptr, 0);
43 }
44 g_lock.unlock();
45 return ((g_tags & ATRACE_TAG_BIONIC) != 0);
46 }
47
get_trace_marker_fd()48 static int get_trace_marker_fd() {
49 g_lock.lock();
50 if (g_trace_marker_fd == -1) {
51 g_trace_marker_fd = open("/sys/kernel/tracing/trace_marker", O_CLOEXEC | O_WRONLY);
52 if (g_trace_marker_fd == -1) {
53 g_trace_marker_fd = open("/sys/kernel/debug/tracing/trace_marker", O_CLOEXEC | O_WRONLY);
54 }
55 }
56 g_lock.unlock();
57 return g_trace_marker_fd;
58 }
59
trace_begin_internal(const char * message)60 static void trace_begin_internal(const char* message) {
61 if (!should_trace()) {
62 return;
63 }
64
65 int trace_marker_fd = get_trace_marker_fd();
66 if (trace_marker_fd == -1) {
67 return;
68 }
69
70 // If bionic tracing has been enabled, then write the message to the
71 // kernel trace_marker.
72 int length = strlen(message);
73 char buf[length + WRITE_OFFSET];
74 size_t len = async_safe_format_buffer(buf, length + WRITE_OFFSET, "B|%d|%s", getpid(), message);
75
76 // Tracing may stop just after checking property and before writing the message.
77 // So the write is acceptable to fail. See b/20666100.
78 TEMP_FAILURE_RETRY(write(trace_marker_fd, buf, len));
79 }
80
bionic_trace_begin(const char * message)81 void bionic_trace_begin(const char* message) {
82 // Some functions called by trace_begin_internal() can call
83 // bionic_trace_begin(). Prevent infinite recursion and non-recursive mutex
84 // deadlock by using a flag in the thread local storage.
85 bionic_tls& tls = __get_bionic_tls();
86 if (tls.bionic_systrace_disabled) {
87 return;
88 }
89 tls.bionic_systrace_disabled = true;
90
91 trace_begin_internal(message);
92
93 tls.bionic_systrace_disabled = false;
94 }
95
trace_end_internal()96 static void trace_end_internal() {
97 if (!should_trace()) {
98 return;
99 }
100
101 int trace_marker_fd = get_trace_marker_fd();
102 if (trace_marker_fd == -1) {
103 return;
104 }
105
106 // This code is intentionally "sub-optimal"; do not optimize this by inlining
107 // the E| string into the write.
108 //
109 // This is because if the const char* string passed to write(trace_marker) is not
110 // in resident memory (e.g. the page of the .rodata section that contains it has
111 // been paged out, or the anonymous page that contained a heap-based string is
112 // swapped in zram), the ftrace code will NOT page it in and instead report
113 // <faulted>.
114 //
115 // We "fix" this by putting the string on the stack, which is more unlikely
116 // to be paged out and pass the pointer to that instead.
117 //
118 // See b/197620214 for more context on this.
119 volatile char buf[2]{'E', '|'};
120 TEMP_FAILURE_RETRY(write(trace_marker_fd, const_cast<const char*>(buf), 2));
121 }
122
bionic_trace_end()123 void bionic_trace_end() {
124 // Some functions called by trace_end_internal() can call
125 // bionic_trace_begin(). Prevent infinite recursion and non-recursive mutex
126 // deadlock by using a flag in the thread local storage.
127 bionic_tls& tls = __get_bionic_tls();
128 if (tls.bionic_systrace_disabled) {
129 return;
130 }
131 tls.bionic_systrace_disabled = true;
132
133 trace_end_internal();
134
135 tls.bionic_systrace_disabled = false;
136 }
137
ScopedTrace(const char * message)138 ScopedTrace::ScopedTrace(const char* message) : called_end_(false) {
139 bionic_trace_begin(message);
140 }
141
~ScopedTrace()142 ScopedTrace::~ScopedTrace() {
143 End();
144 }
145
End()146 void ScopedTrace::End() {
147 if (!called_end_) {
148 bionic_trace_end();
149 called_end_ = true;
150 }
151 }
152