1 /*
2 * Copyright (c) 2020 Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <arch/arch_ops.h>
25 #include <assert.h>
26 #include <inttypes.h>
27 #include <lib/backtrace/backtrace.h>
28 #include <lib/backtrace/symbolize.h>
29 #include <lib/trusty/trusty_app.h>
30 #include <trusty/uuid.h>
31
32 /*
33 * Traces on release builds look like this for:
34 * Backtrace for thread: trusty_app_12_7ee4dddc-177a-420
35 * (app: crasher)
36 * kSP+0x0nn0: 0xffff0..0nnnnn
37 * kSP+0x0nn0: 0xffff0..0nnnnn
38 * kSP+0x0nn0: 0xffff0..0nnnnn
39 * kSP+0x0nn0: 0x00000..0nnnnn crasher_data_func+0x0/0xnn
40 * uSP+0x0nn0: 0x00000..0nnnnn chan_event_handler_proc...
41 * uSP+0x0nn0: 0x00000..0nnnnn tipc_handle_event+0xnn/0xnnn
42 * uSP+0x0nn0: 0x00000..0nnnnn main+0xnn/0xnn
43 * uSP+0x0nn0: 0x00000..0nnnnn libc_start_main_stage2+0xnn/0xnn
44 * uSP+0x0nn0: 0x00000..000000
45 *
46 * Debug builds show more information:
47 * Backtrace for thread: trusty_app_30_7ee4dddc-177a-420
48 * (app: crasher)
49 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
50 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
51 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
52 * 0xffffn..n0: 0x0000n..n/0x00000..0nnnnn crasher_data_func+0x0/0xnn
53 * 0x0000n..n0: 0x0000n..n/0x00000..0nnnnn chan_event_handler_proc...
54 * 0x0000n..n0: 0x0000n..n/0x00000..0nnnnn tipc_handle_event+0xnn/0xnnn
55 * 0x0000n..n0: 0x0000n..n/0x00000..0nnnnn main+0xnn/0xnn
56 * 0x0000n..n0: 0x0000n..n/0x00000..0nnnnn libc_start_main_stage2+0xnn/0xnn
57 * 0x0000n..n0: 0x00000..0/0x00000..000000
58 *
59 * Kernel panics in release builds:
60 * Backtrace for thread: app manager
61 * kSP+0x0nn0: 0xffff0..0nnnnn
62 * kSP+0x0nn0: 0xffff0..0nnnnn
63 * kSP+0x0nn0: 0xffff0..0nnnnn
64 *
65 * Kernel panics in debug builds:
66 * Backtrace for thread: app manager
67 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
68 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
69 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
70 * 0xffffn..n0: 0xffffn..n/0xffff0..0nnnnn
71 */
72
73 #if IS_64BIT
74 #define PRI0xPTR "016" PRIxPTR
75 #else
76 #define PRI0xPTR "08" PRIxPTR
77 #endif
78
79 /* Format for canonical stack offsets */
80 #define PRI0xSTKOFF "04" PRIxPTR
81
82 extern char _start;
83
84 static bool is_on_user_stack(struct thread* _thread, uintptr_t addr);
85 static bool is_on_kernel_stack(struct thread* thread, uintptr_t addr);
86
print_stack_address(struct thread * thread,uintptr_t addr)87 static void print_stack_address(struct thread* thread, uintptr_t addr) {
88 #if TEST_BUILD
89 /*
90 * For security, never print absolute addresses in release builds.
91 */
92 printf("0x%" PRI0xPTR " ", addr);
93 #endif
94
95 if (is_on_user_stack(thread, addr)) {
96 struct trusty_thread* trusty_thread = trusty_thread_get(thread);
97 uintptr_t stack_low_addr =
98 trusty_thread->stack_start - trusty_thread->stack_size;
99 printf("uSP+0x%" PRI0xSTKOFF "x", addr - stack_low_addr);
100 return;
101 }
102
103 if (is_on_kernel_stack(thread, addr)) {
104 printf("kSP+0x%" PRI0xSTKOFF "x", addr - (uintptr_t)thread->stack);
105 return;
106 }
107
108 /*
109 * We should never get here for frame->frame_addr,
110 * but we print something just in case
111 */
112 if (addr) {
113 printf("<non-null>");
114 } else {
115 printf(" <null>");
116 }
117 }
118
print_function_info(struct thread * thread,struct stack_frame * frame,uintptr_t load_bias,struct pc_symbol_info * info)119 static void print_function_info(struct thread* thread,
120 struct stack_frame* frame,
121 uintptr_t load_bias,
122 struct pc_symbol_info* info) {
123 uintptr_t pc_offset;
124 uintptr_t pc = arch_extract_return_addr(frame->ret_addr);
125 __builtin_sub_overflow(pc, load_bias, &pc_offset);
126
127 print_stack_address(thread, frame->frame_addr);
128 printf(": ");
129
130 #if TEST_BUILD
131 /*
132 * For security reasons, never print absolute addresses in
133 * release builds
134 */
135 printf("0x%" PRI0xPTR "/", pc);
136 #endif
137 printf("0x%" PRI0xPTR "x", pc_offset);
138
139 if (info) {
140 printf(" %s+0x%" PRIxPTR "x/0x%" PRIxPTR "x\n", info->symbol,
141 info->offset, info->size);
142 } else {
143 printf("\n");
144 }
145 }
146
dump_user_function(struct thread * thread,struct trusty_app * app,struct stack_frame * frame)147 static void dump_user_function(struct thread* thread,
148 struct trusty_app* app,
149 struct stack_frame* frame) {
150 uintptr_t ret_addr = arch_extract_return_addr(frame->ret_addr);
151 uintptr_t load_bias = app ? app->load_bias : 0;
152 struct pc_symbol_info info;
153 int rc = trusty_app_symbolize(app, ret_addr, &info);
154 if (rc == NO_ERROR) {
155 print_function_info(thread, frame, load_bias, &info);
156 } else {
157 print_function_info(thread, frame, load_bias, NULL);
158 }
159 }
160
dump_kernel_function(struct thread * thread,struct stack_frame * frame)161 static void dump_kernel_function(struct thread* thread,
162 struct stack_frame* frame) {
163 uintptr_t load_bias;
164 __builtin_sub_overflow((uintptr_t)&_start, KERNEL_BASE + KERNEL_LOAD_OFFSET,
165 &load_bias);
166
167 /* TODO(b/164524596): kernel instruction address symbolization */
168 print_function_info(thread, frame, load_bias, NULL);
169 }
170
171 /**
172 * dump_function() - dump symbol info about function containing pc
173 * @thread: thread containing the instruction
174 * @frame: instruction address of the function being dumped and next frame ptr
175 */
dump_function(thread_t * thread,struct stack_frame * frame)176 static void dump_function(thread_t* thread, struct stack_frame* frame) {
177 uintptr_t ret_addr = arch_extract_return_addr(frame->ret_addr);
178
179 if (is_user_address(ret_addr)) {
180 struct trusty_thread* trusty_thread = trusty_thread_get(thread);
181 dump_user_function(thread, trusty_thread ? trusty_thread->app : NULL,
182 frame);
183 } else if (is_kernel_address(ret_addr)) {
184 dump_kernel_function(thread, frame);
185 } else {
186 print_function_info(thread, frame, 0, NULL);
187 }
188 }
189
is_on_user_stack(struct thread * _thread,uintptr_t addr)190 static bool is_on_user_stack(struct thread* _thread, uintptr_t addr) {
191 uintptr_t stack_end;
192 uintptr_t stack_bottom;
193 struct trusty_thread* thread = trusty_thread_get(_thread);
194
195 if (!thread) {
196 return false;
197 }
198
199 stack_end = thread->stack_start;
200 if (__builtin_sub_overflow(stack_end, thread->stack_size, &stack_bottom)) {
201 return false;
202 }
203
204 return stack_bottom <= addr && addr < stack_end;
205 }
206
is_on_kernel_stack(struct thread * thread,uintptr_t addr)207 static bool is_on_kernel_stack(struct thread* thread, uintptr_t addr) {
208 uintptr_t stack_bottom;
209 uintptr_t stack_end;
210
211 stack_bottom = (uintptr_t)thread->stack;
212 if (__builtin_add_overflow(stack_bottom, thread->stack_size, &stack_end)) {
213 return false;
214 }
215
216 return stack_bottom <= addr && addr < stack_end;
217 }
218
219 /**
220 * is_on_stack() - check if address is on the stack
221 * @thread: thread that owns the stack
222 * @addr: address being checked
223 * @user: true if we need to check against user stack, false if kernel stack
224 *
225 * Return: true if @addr is on the stack, false otherwise
226 */
is_on_stack(struct thread * thread,uintptr_t addr,bool user)227 static bool is_on_stack(struct thread* thread, uintptr_t addr, bool user) {
228 if (user) {
229 return is_on_user_stack(thread, addr);
230 } else {
231 return is_on_kernel_stack(thread, addr);
232 }
233 }
234
is_trace_monotonic(uintptr_t prev_fp,uintptr_t next_fp)235 static inline bool is_trace_monotonic(uintptr_t prev_fp, uintptr_t next_fp) {
236 return stack_direction ? next_fp < prev_fp : next_fp > prev_fp;
237 }
238
239 /**
240 * dump_monotonic_backtrace() - dump backtrace while only moving up the stack
241 * @thread: thread being backtraced
242 * @frame: starting frame, used to iterate through frames in-place
243 * @user: true if we're traversing a user stack, false if kernel stack
244 *
245 * Return: state of @frame
246 */
dump_monotonic_backtrace(struct thread * thread,struct stack_frame * frame,bool user)247 static int dump_monotonic_backtrace(struct thread* thread,
248 struct stack_frame* frame,
249 bool user) {
250 int frame_state = FRAME_OK;
251 while (frame_state == FRAME_OK) {
252 frame_state = step_frame(frame, user);
253 dump_function(thread, frame);
254
255 if (is_on_stack(thread, frame->fp, !user)) {
256 /* Transistion to a different stack */
257 return FRAME_OK;
258 }
259 if (is_zero_frame(frame)) {
260 return FRAME_ZERO;
261 }
262 /* Validate that FP actually points to the stack */
263 if (!is_on_stack(thread, frame->fp, user)) {
264 return FRAME_CORRUPT;
265 }
266 /* Stack should only move in one direction */
267 if (frame->frame_addr &&
268 !is_trace_monotonic(frame->frame_addr, frame->fp)) {
269 return FRAME_NON_MONOTONIC;
270 }
271 }
272 return frame_state;
273 }
274
dump_backtrace_etc(struct thread * thread,struct stack_frame * frame)275 static void dump_backtrace_etc(struct thread* thread,
276 struct stack_frame* frame) {
277 /*
278 * dump_backtrace_*() functions can only be called from kernel space.
279 * Expect the first frame to be in kernel address space
280 */
281 if (!is_kernel_address(frame->fp)) {
282 printf("Corrupt stack frame pointer! fp: 0x%" PRIxPTR "\n", frame->fp);
283 return;
284 }
285 int frame_state = dump_monotonic_backtrace(thread, frame, false);
286 if (frame_state == FRAME_NON_MONOTONIC) {
287 printf("Stack frame moved in wrong direction! Stack overflow likely\n");
288 /*
289 * Try dumping the stack before the stack overflow. This will be corrupt
290 * when it reaches the part of the stack that has been reused by the
291 * current exception, but it might have useful information before it
292 * gets to that point.
293 */
294 frame_state = dump_monotonic_backtrace(thread, frame, false);
295 }
296
297 if (frame_state == FRAME_OK && is_user_address(frame->fp)) {
298 frame_state = dump_monotonic_backtrace(thread, frame, true);
299 }
300
301 switch (frame_state) {
302 case FRAME_ZERO:
303 /* Backtrace is expected to terminate with a zero frame */
304 break;
305 case FRAME_NON_MONOTONIC:
306 printf("Stack frame moved in wrong direction! ");
307 dump_function(thread, frame);
308 break;
309 default:
310 printf("Corrupt stack frame! ");
311 dump_function(thread, frame);
312 }
313 }
314
dump_thread_backtrace(struct thread * thread)315 void dump_thread_backtrace(struct thread* thread) {
316 if (!thread) {
317 printf("Not executing in any thread, backtrace not available!\n");
318 return;
319 }
320
321 /*
322 * TODO(b/149918767): Support backtracing for non-current threads. We need
323 * operations on trusty_thread and trusty_app to be thread-safe first.
324 */
325 assert(thread == get_current_thread());
326
327 struct stack_frame frame = {0};
328 get_current_frame(&frame);
329
330 printf("\nBacktrace for thread: %s\n", thread->name);
331 struct trusty_app *app = current_trusty_app();
332 if (app) {
333 char uuid_str[UUID_STR_SIZE];
334 uuid_to_str(&app->props.uuid, uuid_str);
335 printf("(app: %s uuid: %s)\n", app->props.app_name, uuid_str);
336 }
337
338 dump_backtrace_etc(thread, &frame);
339 }
340