1 /*
2  * This file is part of ltrace.
3  * Copyright (C) 2007,2011,2012,2013,2014 Petr Machata, Red Hat Inc.
4  * Copyright (C) 2010 Joe Damato
5  * Copyright (C) 1998,2002,2003,2004,2008,2009 Juan Cespedes
6  * Copyright (C) 2006 Ian Wienand
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as
10  * published by the Free Software Foundation; either version 2 of the
11  * License, or (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  */
23 
24 #include "config.h"
25 
26 #include <asm/unistd.h>
27 #include <assert.h>
28 #include <errno.h>
29 #include <gelf.h>
30 #include <inttypes.h>
31 #include <stdbool.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <unistd.h>
38 
39 #ifdef HAVE_LIBSELINUX
40 # include <selinux/selinux.h>
41 #endif
42 
43 #include "linux-gnu/trace-defs.h"
44 #include "linux-gnu/trace.h"
45 #include "backend.h"
46 #include "breakpoint.h"
47 #include "debug.h"
48 #include "events.h"
49 #include "fetch.h"
50 #include "ltrace-elf.h"
51 #include "options.h"
52 #include "proc.h"
53 #include "prototype.h"
54 #include "ptrace.h"
55 #include "type.h"
56 #include "value.h"
57 
58 void
trace_fail_warning(pid_t pid)59 trace_fail_warning(pid_t pid)
60 {
61 	/* This was adapted from GDB.  */
62 #ifdef HAVE_LIBSELINUX
63 	static int checked = 0;
64 	if (checked)
65 		return;
66 	checked = 1;
67 
68 	/* -1 is returned for errors, 0 if it has no effect, 1 if
69 	 * PTRACE_ATTACH is forbidden.  */
70 	if (security_get_boolean_active("deny_ptrace") == 1)
71 		fprintf(stderr,
72 "The SELinux boolean 'deny_ptrace' is enabled, which may prevent ltrace from\n"
73 "tracing other processes.  You can disable this process attach protection by\n"
74 "issuing 'setsebool deny_ptrace=0' in the superuser context.\n");
75 #endif /* HAVE_LIBSELINUX */
76 }
77 
78 void
trace_me(void)79 trace_me(void)
80 {
81 	debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid());
82 	if (ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) {
83 		perror("PTRACE_TRACEME");
84 		trace_fail_warning(getpid());
85 		exit(1);
86 	}
87 }
88 
89 /* There's a (hopefully) brief period of time after the child process
90  * forks when we can't trace it yet.  Here we wait for kernel to
91  * prepare the process.  */
92 int
wait_for_proc(pid_t pid)93 wait_for_proc(pid_t pid)
94 {
95 	/* man ptrace: PTRACE_ATTACH attaches to the process specified
96 	   in pid.  The child is sent a SIGSTOP, but will not
97 	   necessarily have stopped by the completion of this call;
98 	   use wait() to wait for the child to stop. */
99 	if (waitpid(pid, NULL, __WALL) != pid) {
100 		perror ("trace_pid: waitpid");
101 		return -1;
102 	}
103 
104 	return 0;
105 }
106 
107 int
trace_pid(pid_t pid)108 trace_pid(pid_t pid)
109 {
110 	debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid);
111 	/* This shouldn't emit error messages, as there are legitimate
112 	 * reasons that the PID can't be attached: like it may have
113 	 * already ended.  */
114 	if (ptrace(PTRACE_ATTACH, pid, 0, 0) < 0)
115 		return -1;
116 
117 	return wait_for_proc(pid);
118 }
119 
120 void
trace_set_options(struct process * proc)121 trace_set_options(struct process *proc)
122 {
123 	if (proc->tracesysgood & 0x80)
124 		return;
125 
126 	pid_t pid = proc->pid;
127 	debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid);
128 
129 	long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK |
130 		PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE |
131 		PTRACE_O_TRACEEXEC;
132 	if (ptrace(PTRACE_SETOPTIONS, pid, 0, (void *)options) < 0 &&
133 	    ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)options) < 0) {
134 		perror("PTRACE_SETOPTIONS");
135 		return;
136 	}
137 	proc->tracesysgood |= 0x80;
138 }
139 
140 void
untrace_pid(pid_t pid)141 untrace_pid(pid_t pid) {
142 	debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid);
143 	ptrace(PTRACE_DETACH, pid, 0, 0);
144 }
145 
146 void
continue_after_signal(pid_t pid,int signum)147 continue_after_signal(pid_t pid, int signum)
148 {
149 	debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d",
150 	      pid, signum);
151 	ptrace(PTRACE_SYSCALL, pid, 0, (void *)(uintptr_t)signum);
152 }
153 
154 static enum ecb_status
event_for_pid(Event * event,void * data)155 event_for_pid(Event *event, void *data)
156 {
157 	if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data)
158 		return ECB_YIELD;
159 	return ECB_CONT;
160 }
161 
162 static int
have_events_for(pid_t pid)163 have_events_for(pid_t pid)
164 {
165 	return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL;
166 }
167 
168 void
continue_process(pid_t pid)169 continue_process(pid_t pid)
170 {
171 	debug(DEBUG_PROCESS, "continue_process: pid=%d", pid);
172 
173 	/* Only really continue the process if there are no events in
174 	   the queue for this process.  Otherwise just wait for the
175 	   other events to arrive.  */
176 	if (!have_events_for(pid))
177 		/* We always trace syscalls to control fork(),
178 		 * clone(), execve()... */
179 		ptrace(PTRACE_SYSCALL, pid, 0, 0);
180 	else
181 		debug(DEBUG_PROCESS,
182 		      "putting off the continue, events in que.");
183 }
184 
185 static struct pid_task *
get_task_info(struct pid_set * pids,pid_t pid)186 get_task_info(struct pid_set *pids, pid_t pid)
187 {
188 	assert(pid != 0);
189 	size_t i;
190 	for (i = 0; i < pids->count; ++i)
191 		if (pids->tasks[i].pid == pid)
192 			return &pids->tasks[i];
193 
194 	return NULL;
195 }
196 
197 static struct pid_task *
add_task_info(struct pid_set * pids,pid_t pid)198 add_task_info(struct pid_set *pids, pid_t pid)
199 {
200 	if (pids->count == pids->alloc) {
201 		size_t ns = (2 * pids->alloc) ?: 4;
202 		struct pid_task *n = realloc(pids->tasks,
203 					     sizeof(*pids->tasks) * ns);
204 		if (n == NULL)
205 			return NULL;
206 		pids->tasks = n;
207 		pids->alloc = ns;
208 	}
209 	struct pid_task * task_info = &pids->tasks[pids->count++];
210 	memset(task_info, 0, sizeof(*task_info));
211 	task_info->pid = pid;
212 	return task_info;
213 }
214 
215 static enum callback_status
task_stopped(struct process * task,void * data)216 task_stopped(struct process *task, void *data)
217 {
218 	enum process_status st = process_status(task->pid);
219 	if (data != NULL)
220 		*(enum process_status *)data = st;
221 
222 	/* If the task is already stopped, don't worry about it.
223 	 * Likewise if it managed to become a zombie or terminate in
224 	 * the meantime.  This can happen when the whole thread group
225 	 * is terminating.  */
226 	switch (st) {
227 	case PS_INVALID:
228 	case PS_TRACING_STOP:
229 	case PS_ZOMBIE:
230 		return CBS_CONT;
231 	case PS_SLEEPING:
232 	case PS_STOP:
233 	case PS_OTHER:
234 		return CBS_STOP;
235 	}
236 
237 	abort ();
238 }
239 
240 /* Task is blocked if it's stopped, or if it's a vfork parent.  */
241 static enum callback_status
task_blocked(struct process * task,void * data)242 task_blocked(struct process *task, void *data)
243 {
244 	struct pid_set *pids = data;
245 	struct pid_task *task_info = get_task_info(pids, task->pid);
246 	if (task_info != NULL
247 	    && task_info->vforked)
248 		return CBS_CONT;
249 
250 	return task_stopped(task, NULL);
251 }
252 
253 static Event *process_vfork_on_event(struct event_handler *super, Event *event);
254 
255 static enum callback_status
task_vforked(struct process * task,void * data)256 task_vforked(struct process *task, void *data)
257 {
258 	if (task->event_handler != NULL
259 	    && task->event_handler->on_event == &process_vfork_on_event)
260 		return CBS_STOP;
261 	return CBS_CONT;
262 }
263 
264 static int
is_vfork_parent(struct process * task)265 is_vfork_parent(struct process *task)
266 {
267 	return each_task(task->leader, NULL, &task_vforked, NULL) != NULL;
268 }
269 
270 static enum callback_status
send_sigstop(struct process * task,void * data)271 send_sigstop(struct process *task, void *data)
272 {
273 	struct process *leader = task->leader;
274 	struct pid_set *pids = data;
275 
276 	/* Look for pre-existing task record, or add new.  */
277 	struct pid_task *task_info = get_task_info(pids, task->pid);
278 	if (task_info == NULL)
279 		task_info = add_task_info(pids, task->pid);
280 	if (task_info == NULL) {
281 		perror("send_sigstop: add_task_info");
282 		destroy_event_handler(leader);
283 		/* Signal failure upwards.  */
284 		return CBS_STOP;
285 	}
286 
287 	/* This task still has not been attached to.  It should be
288 	   stopped by the kernel.  */
289 	if (task->state == STATE_BEING_CREATED)
290 		return CBS_CONT;
291 
292 	/* Don't bother sending SIGSTOP if we are already stopped, or
293 	 * if we sent the SIGSTOP already, which happens when we are
294 	 * handling "onexit" and inherited the handler from breakpoint
295 	 * re-enablement.  */
296 	enum process_status st;
297 	if (task_stopped(task, &st) == CBS_CONT)
298 		return CBS_CONT;
299 	if (task_info->sigstopped) {
300 		if (!task_info->delivered)
301 			return CBS_CONT;
302 		task_info->delivered = 0;
303 	}
304 
305 	/* Also don't attempt to stop the process if it's a parent of
306 	 * vforked process.  We set up event handler specially to hint
307 	 * us.  In that case parent is in D state, which we use to
308 	 * weed out unnecessary looping.  */
309 	if (st == PS_SLEEPING
310 	    && is_vfork_parent(task)) {
311 		task_info->vforked = 1;
312 		return CBS_CONT;
313 	}
314 
315 	if (task_kill(task->pid, SIGSTOP) >= 0) {
316 		debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid);
317 		task_info->sigstopped = 1;
318 	} else
319 		fprintf(stderr,
320 			"Warning: couldn't send SIGSTOP to %d\n", task->pid);
321 
322 	return CBS_CONT;
323 }
324 
325 /* On certain kernels, detaching right after a singlestep causes the
326    tracee to be killed with a SIGTRAP (that even though the singlestep
327    was properly caught by waitpid.  The ugly workaround is to put a
328    breakpoint where IP points and let the process continue.  After
329    this the breakpoint can be retracted and the process detached.  */
330 static void
ugly_workaround(struct process * proc)331 ugly_workaround(struct process *proc)
332 {
333 	arch_addr_t ip = get_instruction_pointer(proc);
334 	struct breakpoint *found;
335 	if (DICT_FIND_VAL(proc->leader->breakpoints, &ip, &found) < 0) {
336 		insert_breakpoint_at(proc, ip, NULL);
337 	} else {
338 		assert(found != NULL);
339 		enable_breakpoint(proc, found);
340 	}
341 	ptrace(PTRACE_CONT, proc->pid, 0, 0);
342 }
343 
344 static void
process_stopping_done(struct process_stopping_handler * self,struct process * leader)345 process_stopping_done(struct process_stopping_handler *self,
346 		      struct process *leader)
347 {
348 	debug(DEBUG_PROCESS, "process stopping done %d",
349 	      self->task_enabling_breakpoint->pid);
350 
351 	if (!self->exiting) {
352 		size_t i;
353 		for (i = 0; i < self->pids.count; ++i)
354 			if (self->pids.tasks[i].pid != 0
355 			    && (self->pids.tasks[i].delivered
356 				|| self->pids.tasks[i].sysret))
357 				continue_process(self->pids.tasks[i].pid);
358 		continue_process(self->task_enabling_breakpoint->pid);
359 	}
360 
361 	if (self->exiting) {
362 	ugly_workaround:
363 		self->state = PSH_UGLY_WORKAROUND;
364 		ugly_workaround(self->task_enabling_breakpoint);
365 	} else {
366 		switch ((self->ugly_workaround_p)(self)) {
367 		case CBS_FAIL:
368 			/* xxx handle me */
369 		case CBS_STOP:
370 			break;
371 		case CBS_CONT:
372 			goto ugly_workaround;
373 		}
374 		destroy_event_handler(leader);
375 	}
376 }
377 
378 /* Before we detach, we need to make sure that task's IP is on the
379  * edge of an instruction.  So for tasks that have a breakpoint event
380  * in the queue, we adjust the instruction pointer, just like
381  * continue_after_breakpoint does.  */
382 static enum ecb_status
undo_breakpoint(Event * event,void * data)383 undo_breakpoint(Event *event, void *data)
384 {
385 	if (event != NULL
386 	    && event->proc->leader == data
387 	    && event->type == EVENT_BREAKPOINT)
388 		set_instruction_pointer(event->proc, event->e_un.brk_addr);
389 	return ECB_CONT;
390 }
391 
392 static enum callback_status
untrace_task(struct process * task,void * data)393 untrace_task(struct process *task, void *data)
394 {
395 	if (task != data)
396 		untrace_pid(task->pid);
397 	return CBS_CONT;
398 }
399 
400 static enum callback_status
remove_task(struct process * task,void * data)401 remove_task(struct process *task, void *data)
402 {
403 	/* Don't untrace leader just yet.  */
404 	if (task != data)
405 		remove_process(task);
406 	return CBS_CONT;
407 }
408 
409 static enum callback_status
retract_breakpoint_cb(struct process * proc,struct breakpoint * bp,void * data)410 retract_breakpoint_cb(struct process *proc, struct breakpoint *bp, void *data)
411 {
412 	breakpoint_on_retract(bp, proc);
413 	return CBS_CONT;
414 }
415 
416 static void
detach_process(struct process * leader)417 detach_process(struct process *leader)
418 {
419 	each_qd_event(&undo_breakpoint, leader);
420 	disable_all_breakpoints(leader);
421 	proc_each_breakpoint(leader, NULL, retract_breakpoint_cb, NULL);
422 
423 	/* Now untrace the process, if it was attached to by -p.  */
424 	struct opt_p_t *it;
425 	for (it = opt_p; it != NULL; it = it->next) {
426 		struct process *proc = pid2proc(it->pid);
427 		if (proc == NULL)
428 			continue;
429 		if (proc->leader == leader) {
430 			each_task(leader, NULL, &untrace_task, NULL);
431 			break;
432 		}
433 	}
434 	each_task(leader, NULL, &remove_task, leader);
435 	destroy_event_handler(leader);
436 	remove_task(leader, NULL);
437 }
438 
439 static void
handle_stopping_event(struct pid_task * task_info,Event ** eventp)440 handle_stopping_event(struct pid_task *task_info, Event **eventp)
441 {
442 	/* Mark all events, so that we know whom to SIGCONT later.  */
443 	if (task_info != NULL)
444 		task_info->got_event = 1;
445 
446 	Event *event = *eventp;
447 
448 	/* In every state, sink SIGSTOP events for tasks that it was
449 	 * sent to.  */
450 	if (task_info != NULL
451 	    && event->type == EVENT_SIGNAL
452 	    && event->e_un.signum == SIGSTOP) {
453 		debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid);
454 		if (task_info->sigstopped
455 		    && !task_info->delivered) {
456 			task_info->delivered = 1;
457 			*eventp = NULL; // sink the event
458 		} else
459 			fprintf(stderr, "suspicious: %d got SIGSTOP, but "
460 				"sigstopped=%d and delivered=%d\n",
461 				task_info->pid, task_info->sigstopped,
462 				task_info->delivered);
463 	}
464 }
465 
466 /* Some SIGSTOPs may have not been delivered to their respective tasks
467  * yet.  They are still in the queue.  If we have seen an event for
468  * that process, continue it, so that the SIGSTOP can be delivered and
469  * caught by ltrace.  We don't mind that the process is after
470  * breakpoint (and therefore potentially doesn't have aligned IP),
471  * because the signal will be delivered without the process actually
472  * starting.  */
473 static void
continue_for_sigstop_delivery(struct pid_set * pids)474 continue_for_sigstop_delivery(struct pid_set *pids)
475 {
476 	size_t i;
477 	for (i = 0; i < pids->count; ++i) {
478 		if (pids->tasks[i].pid != 0
479 		    && pids->tasks[i].sigstopped
480 		    && !pids->tasks[i].delivered
481 		    && pids->tasks[i].got_event) {
482 			debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
483 			      pids->tasks[i].pid);
484 			ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0);
485 		}
486 	}
487 }
488 
489 static int
event_exit_p(Event * event)490 event_exit_p(Event *event)
491 {
492 	return event != NULL && (event->type == EVENT_EXIT
493 				 || event->type == EVENT_EXIT_SIGNAL);
494 }
495 
496 static int
event_exit_or_none_p(Event * event)497 event_exit_or_none_p(Event *event)
498 {
499 	return event == NULL || event_exit_p(event)
500 		|| event->type == EVENT_NONE;
501 }
502 
503 static int
await_sigstop_delivery(struct pid_set * pids,struct pid_task * task_info,Event * event)504 await_sigstop_delivery(struct pid_set *pids, struct pid_task *task_info,
505 		       Event *event)
506 {
507 	/* If we still didn't get our SIGSTOP, continue the process
508 	 * and carry on.  */
509 	if (event != NULL && !event_exit_or_none_p(event)
510 	    && task_info != NULL && task_info->sigstopped) {
511 		debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
512 		      task_info->pid);
513 		/* We should get the signal the first thing
514 		 * after this, so it should be OK to continue
515 		 * even if we are over a breakpoint.  */
516 		ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0);
517 
518 	} else {
519 		/* If all SIGSTOPs were delivered, uninstall the
520 		 * handler and continue everyone.  */
521 		/* XXX I suspect that we should check tasks that are
522 		 * still around.  Is things are now, there should be a
523 		 * race between waiting for everyone to stop and one
524 		 * of the tasks exiting.  */
525 		int all_clear = 1;
526 		size_t i;
527 		for (i = 0; i < pids->count; ++i)
528 			if (pids->tasks[i].pid != 0
529 			    && pids->tasks[i].sigstopped
530 			    && !pids->tasks[i].delivered) {
531 				all_clear = 0;
532 				break;
533 			}
534 		return all_clear;
535 	}
536 
537 	return 0;
538 }
539 
540 static int
all_stops_accountable(struct pid_set * pids)541 all_stops_accountable(struct pid_set *pids)
542 {
543 	size_t i;
544 	for (i = 0; i < pids->count; ++i)
545 		if (pids->tasks[i].pid != 0
546 		    && !pids->tasks[i].got_event
547 		    && !have_events_for(pids->tasks[i].pid))
548 			return 0;
549 	return 1;
550 }
551 
552 #ifndef ARCH_HAVE_SW_SINGLESTEP
553 enum sw_singlestep_status
arch_sw_singlestep(struct process * proc,struct breakpoint * bp,int (* add_cb)(arch_addr_t,struct sw_singlestep_data *),struct sw_singlestep_data * data)554 arch_sw_singlestep(struct process *proc, struct breakpoint *bp,
555 		   int (*add_cb)(arch_addr_t, struct sw_singlestep_data *),
556 		   struct sw_singlestep_data *data)
557 {
558 	return SWS_HW;
559 }
560 #endif
561 
562 static Event *process_stopping_on_event(struct event_handler *super,
563 					Event *event);
564 
565 static void
remove_sw_breakpoints(struct process * proc)566 remove_sw_breakpoints(struct process *proc)
567 {
568 	struct process_stopping_handler *self
569 		= (void *)proc->leader->event_handler;
570 	assert(self != NULL);
571 	assert(self->super.on_event == process_stopping_on_event);
572 
573 	int ct = sizeof(self->sws_bps) / sizeof(*self->sws_bps);
574 	int i;
575 	for (i = 0; i < ct; ++i)
576 		if (self->sws_bps[i] != NULL) {
577 			delete_breakpoint_at(proc, self->sws_bps[i]->addr);
578 			self->sws_bps[i] = NULL;
579 		}
580 }
581 
582 static void
sw_singlestep_bp_on_hit(struct breakpoint * bp,struct process * proc)583 sw_singlestep_bp_on_hit(struct breakpoint *bp, struct process *proc)
584 {
585 	remove_sw_breakpoints(proc);
586 }
587 
588 struct sw_singlestep_data {
589 	struct process_stopping_handler *self;
590 };
591 
592 static int
sw_singlestep_add_bp(arch_addr_t addr,struct sw_singlestep_data * data)593 sw_singlestep_add_bp(arch_addr_t addr, struct sw_singlestep_data *data)
594 {
595 	struct process_stopping_handler *self = data->self;
596 	struct process *proc = self->task_enabling_breakpoint;
597 
598 	int ct = sizeof(self->sws_bps) / sizeof(*self->sws_bps);
599 	int i;
600 	for (i = 0; i < ct; ++i)
601 		if (self->sws_bps[i] == NULL) {
602 			static struct bp_callbacks cbs = {
603 				.on_hit = sw_singlestep_bp_on_hit,
604 			};
605 			struct breakpoint *bp
606 				= insert_breakpoint_at(proc, addr, NULL);
607 			breakpoint_set_callbacks(bp, &cbs);
608 			self->sws_bps[i] = bp;
609 			return 0;
610 		}
611 
612 	assert(!"Too many sw singlestep breakpoints!");
613 	abort();
614 }
615 
616 static int
singlestep(struct process_stopping_handler * self)617 singlestep(struct process_stopping_handler *self)
618 {
619 	size_t i;
620 	for (i = 0; i < sizeof(self->sws_bps) / sizeof(*self->sws_bps); ++i)
621 		self->sws_bps[i] = NULL;
622 
623 	struct sw_singlestep_data data = { self };
624 	switch (arch_sw_singlestep(self->task_enabling_breakpoint,
625 				   self->breakpoint_being_enabled,
626 				   &sw_singlestep_add_bp, &data)) {
627 	case SWS_HW:
628 		/* Otherwise do the default action: singlestep.  */
629 		debug(1, "PTRACE_SINGLESTEP");
630 		if (ptrace(PTRACE_SINGLESTEP,
631 			   self->task_enabling_breakpoint->pid, 0, 0)) {
632 			perror("PTRACE_SINGLESTEP");
633 			return -1;
634 		}
635 		return 0;
636 
637 	case SWS_OK:
638 		return 0;
639 
640 	case SWS_FAIL:
641 		return -1;
642 	}
643 	abort();
644 }
645 
646 static void
post_singlestep(struct process_stopping_handler * self,struct Event ** eventp)647 post_singlestep(struct process_stopping_handler *self,
648 		struct Event **eventp)
649 {
650 	continue_for_sigstop_delivery(&self->pids);
651 
652 	if (*eventp != NULL && (*eventp)->type == EVENT_BREAKPOINT)
653 		*eventp = NULL; // handled
654 
655 	struct process *proc = self->task_enabling_breakpoint;
656 
657 	remove_sw_breakpoints(proc);
658 	self->breakpoint_being_enabled = NULL;
659 }
660 
661 static void
singlestep_error(struct process_stopping_handler * self)662 singlestep_error(struct process_stopping_handler *self)
663 {
664 	struct process *teb = self->task_enabling_breakpoint;
665 	struct breakpoint *sbp = self->breakpoint_being_enabled;
666 	fprintf(stderr, "%d couldn't continue when handling %s (%p) at %p\n",
667 		teb->pid, breakpoint_name(sbp),	sbp->addr,
668 		get_instruction_pointer(teb));
669 	delete_breakpoint_at(teb->leader, sbp->addr);
670 }
671 
672 static void
pt_continue(struct process_stopping_handler * self)673 pt_continue(struct process_stopping_handler *self)
674 {
675 	struct process *teb = self->task_enabling_breakpoint;
676 	debug(1, "PTRACE_CONT");
677 	ptrace(PTRACE_CONT, teb->pid, 0, 0);
678 }
679 
680 static void
pt_singlestep(struct process_stopping_handler * self)681 pt_singlestep(struct process_stopping_handler *self)
682 {
683 	if (singlestep(self) < 0)
684 		singlestep_error(self);
685 }
686 
687 static void
disable_and(struct process_stopping_handler * self,void (* do_this)(struct process_stopping_handler * self))688 disable_and(struct process_stopping_handler *self,
689 	    void (*do_this)(struct process_stopping_handler *self))
690 {
691 	struct process *teb = self->task_enabling_breakpoint;
692 	debug(DEBUG_PROCESS, "all stopped, now singlestep/cont %d", teb->pid);
693 	if (self->breakpoint_being_enabled->enabled)
694 		disable_breakpoint(teb, self->breakpoint_being_enabled);
695 	(do_this)(self);
696 	self->state = PSH_SINGLESTEP;
697 }
698 
699 void
linux_ptrace_disable_and_singlestep(struct process_stopping_handler * self)700 linux_ptrace_disable_and_singlestep(struct process_stopping_handler *self)
701 {
702 	disable_and(self, &pt_singlestep);
703 }
704 
705 void
linux_ptrace_disable_and_continue(struct process_stopping_handler * self)706 linux_ptrace_disable_and_continue(struct process_stopping_handler *self)
707 {
708 	disable_and(self, &pt_continue);
709 }
710 
711 /* This event handler is installed when we are in the process of
712  * stopping the whole thread group to do the pointer re-enablement for
713  * one of the threads.  We pump all events to the queue for later
714  * processing while we wait for all the threads to stop.  When this
715  * happens, we let the re-enablement thread to PTRACE_SINGLESTEP,
716  * re-enable, and continue everyone.  */
717 static Event *
process_stopping_on_event(struct event_handler * super,Event * event)718 process_stopping_on_event(struct event_handler *super, Event *event)
719 {
720 	struct process_stopping_handler *self = (void *)super;
721 	struct process *task = event->proc;
722 	struct process *leader = task->leader;
723 	struct process *teb = self->task_enabling_breakpoint;
724 
725 	debug(DEBUG_PROCESS,
726 	      "process_stopping_on_event: pid %d; event type %d; state %d",
727 	      task->pid, event->type, self->state);
728 
729 	struct pid_task *task_info = get_task_info(&self->pids, task->pid);
730 	if (task_info == NULL)
731 		fprintf(stderr, "new task??? %d\n", task->pid);
732 	handle_stopping_event(task_info, &event);
733 
734 	int state = self->state;
735 	int event_to_queue = !event_exit_or_none_p(event);
736 
737 	/* Deactivate the entry if the task exits.  */
738 	if (event_exit_p(event) && task_info != NULL)
739 		task_info->pid = 0;
740 
741 	/* Always handle sysrets.  Whether sysret occurred and what
742 	 * sys it rets from may need to be determined based on process
743 	 * stack, so we need to keep that in sync with reality.  Note
744 	 * that we don't continue the process after the sysret is
745 	 * handled.  See continue_after_syscall.  */
746 	if (event != NULL && event->type == EVENT_SYSRET) {
747 		debug(1, "%d LT_EV_SYSRET", event->proc->pid);
748 		event_to_queue = 0;
749 		if (task_info != NULL)
750 			task_info->sysret = 1;
751 	}
752 
753 	switch (state) {
754 	case PSH_STOPPING:
755 		/* If everyone is stopped, singlestep.  */
756 		if (each_task(leader, NULL, &task_blocked,
757 			      &self->pids) == NULL) {
758 			(self->on_all_stopped)(self);
759 			state = self->state;
760 		}
761 		break;
762 
763 	case PSH_SINGLESTEP:
764 		/* In singlestep state, breakpoint signifies that we
765 		 * have now stepped, and can re-enable the breakpoint.  */
766 		if (event != NULL && task == teb) {
767 
768 			/* If this was caused by a real breakpoint, as
769 			 * opposed to a singlestep, assume that it's
770 			 * an artificial breakpoint installed for some
771 			 * reason for the re-enablement.  In that case
772 			 * handle it.  */
773 			if (event->type == EVENT_BREAKPOINT) {
774 				arch_addr_t ip
775 					= get_instruction_pointer(task);
776 				struct breakpoint *other
777 					= address2bpstruct(leader, ip);
778 				if (other != NULL)
779 					breakpoint_on_hit(other, task);
780 			}
781 
782 			/* If we got SIGNAL instead of BREAKPOINT,
783 			 * then this is not singlestep at all.  */
784 			if (event->type == EVENT_SIGNAL) {
785 			do_singlestep:
786 				if (singlestep(self) < 0) {
787 					singlestep_error(self);
788 					post_singlestep(self, &event);
789 					goto psh_sinking;
790 				}
791 				break;
792 			} else {
793 				switch ((self->keep_stepping_p)(self)) {
794 				case CBS_FAIL:
795 					/* XXX handle me */
796 				case CBS_STOP:
797 					break;
798 				case CBS_CONT:
799 					/* Sink singlestep event.  */
800 					if (event->type == EVENT_BREAKPOINT)
801 						event = NULL;
802 					goto do_singlestep;
803 				}
804 			}
805 
806 			/* Re-enable the breakpoint that we are
807 			 * stepping over.  */
808 			struct breakpoint *sbp = self->breakpoint_being_enabled;
809 			if (sbp->enabled)
810 				enable_breakpoint(teb, sbp);
811 
812 			post_singlestep(self, &event);
813 			goto psh_sinking;
814 		}
815 		break;
816 
817 	psh_sinking:
818 		state = self->state = PSH_SINKING;
819 		/* Fall through.  */
820 	case PSH_SINKING:
821 		if (await_sigstop_delivery(&self->pids, task_info, event))
822 			process_stopping_done(self, leader);
823 		break;
824 
825 	case PSH_UGLY_WORKAROUND:
826 		if (event == NULL)
827 			break;
828 		if (event->type == EVENT_BREAKPOINT) {
829 			undo_breakpoint(event, leader);
830 			if (task == teb)
831 				self->task_enabling_breakpoint = NULL;
832 		}
833 		if (self->task_enabling_breakpoint == NULL
834 		    && all_stops_accountable(&self->pids)) {
835 			undo_breakpoint(event, leader);
836 			detach_process(leader);
837 			event = NULL; // handled
838 		}
839 	}
840 
841 	if (event != NULL && event_to_queue) {
842 		enque_event(event);
843 		event = NULL; // sink the event
844 	}
845 
846 	return event;
847 }
848 
849 static void
process_stopping_destroy(struct event_handler * super)850 process_stopping_destroy(struct event_handler *super)
851 {
852 	struct process_stopping_handler *self = (void *)super;
853 	free(self->pids.tasks);
854 }
855 
856 static enum callback_status
no(struct process_stopping_handler * self)857 no(struct process_stopping_handler *self)
858 {
859 	return CBS_STOP;
860 }
861 
862 int
process_install_stopping_handler(struct process * proc,struct breakpoint * sbp,void (* as)(struct process_stopping_handler *),enum callback_status (* ks)(struct process_stopping_handler *),enum callback_status (* uw)(struct process_stopping_handler *))863 process_install_stopping_handler(struct process *proc, struct breakpoint *sbp,
864 				 void (*as)(struct process_stopping_handler *),
865 				 enum callback_status (*ks)
866 					 (struct process_stopping_handler *),
867 				 enum callback_status (*uw)
868 					(struct process_stopping_handler *))
869 {
870 	debug(DEBUG_FUNCTION,
871 	      "process_install_stopping_handler: pid=%d", proc->pid);
872 
873 	struct process_stopping_handler *handler = calloc(sizeof(*handler), 1);
874 	if (handler == NULL)
875 		return -1;
876 
877 	if (as == NULL)
878 		as = &linux_ptrace_disable_and_singlestep;
879 	if (ks == NULL)
880 		ks = &no;
881 	if (uw == NULL)
882 		uw = &no;
883 
884 	handler->super.on_event = process_stopping_on_event;
885 	handler->super.destroy = process_stopping_destroy;
886 	handler->task_enabling_breakpoint = proc;
887 	handler->breakpoint_being_enabled = sbp;
888 	handler->on_all_stopped = as;
889 	handler->keep_stepping_p = ks;
890 	handler->ugly_workaround_p = uw;
891 
892 	install_event_handler(proc->leader, &handler->super);
893 
894 	if (each_task(proc->leader, NULL, &send_sigstop,
895 		      &handler->pids) != NULL) {
896 		destroy_event_handler(proc);
897 		return -1;
898 	}
899 
900 	/* And deliver the first fake event, in case all the
901 	 * conditions are already fulfilled.  */
902 	Event ev = {
903 		.type = EVENT_NONE,
904 		.proc = proc,
905 	};
906 	process_stopping_on_event(&handler->super, &ev);
907 
908 	return 0;
909 }
910 
911 void
continue_after_breakpoint(struct process * proc,struct breakpoint * sbp)912 continue_after_breakpoint(struct process *proc, struct breakpoint *sbp)
913 {
914 	debug(DEBUG_PROCESS,
915 	      "continue_after_breakpoint: pid=%d, addr=%p",
916 	      proc->pid, sbp->addr);
917 
918 	set_instruction_pointer(proc, sbp->addr);
919 
920 	if (sbp->enabled == 0) {
921 		continue_process(proc->pid);
922 	} else if (process_install_stopping_handler
923 			(proc, sbp, NULL, NULL, NULL) < 0) {
924 		perror("process_stopping_handler_create");
925 		/* Carry on not bothering to re-enable.  */
926 		continue_process(proc->pid);
927 	}
928 }
929 
930 /**
931  * Ltrace exit.  When we are about to exit, we have to go through all
932  * the processes, stop them all, remove all the breakpoints, and then
933  * detach the processes that we attached to using -p.  If we left the
934  * other tasks running, they might hit stray return breakpoints and
935  * produce artifacts, so we better stop everyone, even if it's a bit
936  * of extra work.
937  */
938 struct ltrace_exiting_handler
939 {
940 	struct event_handler super;
941 	struct pid_set pids;
942 };
943 
944 static Event *
ltrace_exiting_on_event(struct event_handler * super,Event * event)945 ltrace_exiting_on_event(struct event_handler *super, Event *event)
946 {
947 	struct ltrace_exiting_handler *self = (void *)super;
948 	struct process *task = event->proc;
949 	struct process *leader = task->leader;
950 
951 	debug(DEBUG_PROCESS,
952 	      "ltrace_exiting_on_event: pid %d; event type %d",
953 	      task->pid, event->type);
954 
955 	struct pid_task *task_info = get_task_info(&self->pids, task->pid);
956 	handle_stopping_event(task_info, &event);
957 
958 	if (event != NULL && event->type == EVENT_BREAKPOINT)
959 		undo_breakpoint(event, leader);
960 
961 	if (await_sigstop_delivery(&self->pids, task_info, event)
962 	    && all_stops_accountable(&self->pids))
963 		detach_process(leader);
964 
965 	/* Sink all non-exit events.  We are about to exit, so we
966 	 * don't bother with queuing them. */
967 	if (event_exit_or_none_p(event))
968 		return event;
969 
970 	return NULL;
971 }
972 
973 static void
ltrace_exiting_destroy(struct event_handler * super)974 ltrace_exiting_destroy(struct event_handler *super)
975 {
976 	struct ltrace_exiting_handler *self = (void *)super;
977 	free(self->pids.tasks);
978 }
979 
980 static int
ltrace_exiting_install_handler(struct process * proc)981 ltrace_exiting_install_handler(struct process *proc)
982 {
983 	/* Only install to leader.  */
984 	if (proc->leader != proc)
985 		return 0;
986 
987 	/* Perhaps we are already installed, if the user passed
988 	 * several -p options that are tasks of one process.  */
989 	if (proc->event_handler != NULL
990 	    && proc->event_handler->on_event == &ltrace_exiting_on_event)
991 		return 0;
992 
993 	/* If stopping handler is already present, let it do the
994 	 * work.  */
995 	if (proc->event_handler != NULL) {
996 		assert(proc->event_handler->on_event
997 		       == &process_stopping_on_event);
998 		struct process_stopping_handler *other
999 			= (void *)proc->event_handler;
1000 		other->exiting = 1;
1001 		return 0;
1002 	}
1003 
1004 	struct ltrace_exiting_handler *handler
1005 		= calloc(sizeof(*handler), 1);
1006 	if (handler == NULL) {
1007 		perror("malloc exiting handler");
1008 	fatal:
1009 		/* XXXXXXXXXXXXXXXXXXX fixme */
1010 		return -1;
1011 	}
1012 
1013 	handler->super.on_event = ltrace_exiting_on_event;
1014 	handler->super.destroy = ltrace_exiting_destroy;
1015 	install_event_handler(proc->leader, &handler->super);
1016 
1017 	if (each_task(proc->leader, NULL, &send_sigstop,
1018 		      &handler->pids) != NULL)
1019 		goto fatal;
1020 
1021 	return 0;
1022 }
1023 
1024 /*
1025  * When the traced process vforks, it's suspended until the child
1026  * process calls _exit or exec*.  In the meantime, the two share the
1027  * address space.
1028  *
1029  * The child process should only ever call _exit or exec*, but we
1030  * can't count on that (it's not the role of ltrace to policy, but to
1031  * observe).  In any case, we will _at least_ have to deal with
1032  * removal of vfork return breakpoint (which we have to smuggle back
1033  * in, so that the parent can see it, too), and introduction of exec*
1034  * return breakpoint.  Since we already have both breakpoint actions
1035  * to deal with, we might as well support it all.
1036  *
1037  * The gist is that we pretend that the child is in a thread group
1038  * with its parent, and handle it as a multi-threaded case, with the
1039  * exception that we know that the parent is blocked, and don't
1040  * attempt to stop it.  When the child execs, we undo the setup.
1041  */
1042 
1043 struct process_vfork_handler
1044 {
1045 	struct event_handler super;
1046 	int vfork_bp_refd:1;
1047 };
1048 
1049 static Event *
process_vfork_on_event(struct event_handler * super,Event * event)1050 process_vfork_on_event(struct event_handler *super, Event *event)
1051 {
1052 	debug(DEBUG_PROCESS,
1053 	      "process_vfork_on_event: pid %d; event type %d",
1054 	      event->proc->pid, event->type);
1055 
1056 	struct process_vfork_handler *self = (void *)super;
1057 	struct process *proc = event->proc;
1058 	assert(self != NULL);
1059 
1060 	switch (event->type) {
1061 	case EVENT_BREAKPOINT:
1062 		/* We turn on the vfork return breakpoint (which
1063 		 * should be the one that we have tripped over just
1064 		 * now) one extra time, so that the vfork parent hits
1065 		 * it as well.  */
1066 		if (!self->vfork_bp_refd) {
1067 			struct breakpoint *sbp = NULL;
1068 			DICT_FIND_VAL(proc->leader->breakpoints,
1069 				      &event->e_un.brk_addr, &sbp);
1070 			assert(sbp != NULL);
1071 			breakpoint_turn_on(sbp, proc->leader);
1072 			self->vfork_bp_refd = 1;
1073 		}
1074 		break;
1075 
1076 	case EVENT_EXIT:
1077 	case EVENT_EXIT_SIGNAL:
1078 	case EVENT_EXEC:
1079 		/* Remove the leader that we artificially set up
1080 		 * earlier.  */
1081 		change_process_leader(proc, proc);
1082 		destroy_event_handler(proc);
1083 		continue_process(proc->parent->pid);
1084 
1085 	default:
1086 		;
1087 	}
1088 
1089 	return event;
1090 }
1091 
1092 void
continue_after_vfork(struct process * proc)1093 continue_after_vfork(struct process *proc)
1094 {
1095 	debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid);
1096 	struct process_vfork_handler *handler = calloc(sizeof(*handler), 1);
1097 	if (handler == NULL) {
1098 		perror("malloc vfork handler");
1099 		/* Carry on not bothering to treat the process as
1100 		 * necessary.  */
1101 		continue_process(proc->parent->pid);
1102 		return;
1103 	}
1104 
1105 	/* We must set up custom event handler, so that we see
1106 	 * exec/exit events for the task itself.  */
1107 	handler->super.on_event = process_vfork_on_event;
1108 	install_event_handler(proc, &handler->super);
1109 
1110 	/* Make sure that the child is sole thread.  */
1111 	assert(proc->leader == proc);
1112 	assert(proc->next == NULL || proc->next->leader != proc);
1113 
1114 	/* Make sure that the child's parent is properly set up.  */
1115 	assert(proc->parent != NULL);
1116 	assert(proc->parent->leader != NULL);
1117 
1118 	change_process_leader(proc, proc->parent->leader);
1119 }
1120 
1121 static int
is_mid_stopping(struct process * proc)1122 is_mid_stopping(struct process *proc)
1123 {
1124 	return proc != NULL
1125 		&& proc->event_handler != NULL
1126 		&& proc->event_handler->on_event == &process_stopping_on_event;
1127 }
1128 
1129 void
continue_after_syscall(struct process * proc,int sysnum,int ret_p)1130 continue_after_syscall(struct process *proc, int sysnum, int ret_p)
1131 {
1132 	/* Don't continue if we are mid-stopping.  */
1133 	if (ret_p && (is_mid_stopping(proc) || is_mid_stopping(proc->leader))) {
1134 		debug(DEBUG_PROCESS,
1135 		      "continue_after_syscall: don't continue %d",
1136 		      proc->pid);
1137 		return;
1138 	}
1139 	continue_process(proc->pid);
1140 }
1141 
1142 void
continue_after_exec(struct process * proc)1143 continue_after_exec(struct process *proc)
1144 {
1145 	continue_process(proc->pid);
1146 
1147 	/* After the exec, we expect to hit the first executable
1148 	 * instruction.
1149 	 *
1150 	 * XXX TODO It would be nice to have this removed, but then we
1151 	 * need to do that also for initial call to wait_for_proc in
1152 	 * execute_program.  In that case we could generate a
1153 	 * EVENT_FIRST event or something, or maybe this could somehow
1154 	 * be rolled into EVENT_NEW.  */
1155 	wait_for_proc(proc->pid);
1156 	continue_process(proc->pid);
1157 }
1158 
1159 /* If ltrace gets SIGINT, the processes directly or indirectly run by
1160  * ltrace get it too.  We just have to wait long enough for the signal
1161  * to be delivered and the process terminated, which we notice and
1162  * exit ltrace, too.  So there's not much we need to do there.  We
1163  * want to keep tracing those processes as usual, in case they just
1164  * SIG_IGN the SIGINT to do their shutdown etc.
1165  *
1166  * For processes ran on the background, we want to install an exit
1167  * handler that stops all the threads, removes all breakpoints, and
1168  * detaches.
1169  */
1170 void
os_ltrace_exiting(void)1171 os_ltrace_exiting(void)
1172 {
1173 	struct opt_p_t *it;
1174 	for (it = opt_p; it != NULL; it = it->next) {
1175 		struct process *proc = pid2proc(it->pid);
1176 		if (proc == NULL || proc->leader == NULL)
1177 			continue;
1178 		if (ltrace_exiting_install_handler(proc->leader) < 0)
1179 			fprintf(stderr,
1180 				"Couldn't install exiting handler for %d.\n",
1181 				proc->pid);
1182 	}
1183 }
1184 
1185 int
os_ltrace_exiting_sighandler(void)1186 os_ltrace_exiting_sighandler(void)
1187 {
1188 	extern int linux_in_waitpid;
1189 	if (linux_in_waitpid) {
1190 		os_ltrace_exiting();
1191 		return 1;
1192 	}
1193 	return 0;
1194 }
1195 
1196 size_t
umovebytes(struct process * proc,arch_addr_t addr,void * buf,size_t len)1197 umovebytes(struct process *proc, arch_addr_t addr, void *buf, size_t len)
1198 {
1199 
1200 	union {
1201 		long a;
1202 		char c[sizeof(long)];
1203 	} a;
1204 	int started = 0;
1205 	size_t offset = 0, bytes_read = 0;
1206 
1207 	while (offset < len) {
1208 		a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1209 		if (a.a == -1 && errno) {
1210 			if (started && errno == EIO)
1211 				return bytes_read;
1212 			else
1213 				return -1;
1214 		}
1215 		started = 1;
1216 
1217 		if (len - offset >= sizeof(long)) {
1218 			memcpy(buf + offset, &a.c[0], sizeof(long));
1219 			bytes_read += sizeof(long);
1220 		}
1221 		else {
1222 			memcpy(buf + offset, &a.c[0], len - offset);
1223 			bytes_read += (len - offset);
1224 		}
1225 		offset += sizeof(long);
1226 	}
1227 
1228 	return bytes_read;
1229 }
1230 
1231 struct irelative_name_data_t {
1232 	GElf_Addr addr;
1233 	const char *found_name;
1234 };
1235 
1236 static enum callback_status
irelative_name_cb(GElf_Sym * symbol,const char * name,void * d)1237 irelative_name_cb(GElf_Sym *symbol, const char *name, void *d)
1238 {
1239 	struct irelative_name_data_t *data = d;
1240 
1241 	if (symbol->st_value == data->addr) {
1242 		bool is_ifunc = false;
1243 #ifdef STT_GNU_IFUNC
1244 		is_ifunc = GELF_ST_TYPE(symbol->st_info) == STT_GNU_IFUNC;
1245 #endif
1246 		data->found_name = name;
1247 
1248 		/* Keep looking, unless we found the actual IFUNC
1249 		 * symbol.  What we matched may have been a symbol
1250 		 * denoting the resolver function, which would have
1251 		 * the same address.  */
1252 		return CBS_STOP_IF(is_ifunc);
1253 	}
1254 
1255 	return CBS_CONT;
1256 }
1257 
1258 char *
linux_elf_find_irelative_name(struct ltelf * lte,GElf_Addr addr)1259 linux_elf_find_irelative_name(struct ltelf *lte, GElf_Addr addr)
1260 {
1261 	struct irelative_name_data_t data = { addr, NULL };
1262 	if (addr != 0
1263 	    && elf_each_symbol(lte, 0,
1264 			       irelative_name_cb, &data).status < 0)
1265 		return NULL;
1266 
1267 	const char *name;
1268 	if (data.found_name != NULL) {
1269 		name = data.found_name;
1270 	} else {
1271 #define NAME "IREL."
1272 		/* NAME\0 + 0x + digits.  */
1273 		char *tmp_name = alloca(sizeof NAME + 2 + 16);
1274 		sprintf(tmp_name, NAME "%#" PRIx64, (uint64_t) addr);
1275 		name = tmp_name;
1276 #undef NAME
1277 	}
1278 
1279 	return strdup(name);
1280 }
1281 
1282 enum plt_status
linux_elf_add_plt_entry_irelative(struct process * proc,struct ltelf * lte,GElf_Rela * rela,size_t ndx,struct library_symbol ** ret)1283 linux_elf_add_plt_entry_irelative(struct process *proc, struct ltelf *lte,
1284 				  GElf_Rela *rela, size_t ndx,
1285 				  struct library_symbol **ret)
1286 
1287 {
1288 	char *name = linux_elf_find_irelative_name(lte, rela->r_addend);
1289 	int i = default_elf_add_plt_entry(proc, lte, name, rela, ndx, ret);
1290 	free(name);
1291 	return i < 0 ? PLT_FAIL : PLT_OK;
1292 }
1293 
1294 struct prototype *
linux_IFUNC_prototype(void)1295 linux_IFUNC_prototype(void)
1296 {
1297 	static struct prototype ret;
1298 	if (ret.return_info == NULL) {
1299 		prototype_init(&ret);
1300 		ret.return_info = type_get_voidptr();
1301 		ret.own_return_info = 0;
1302 	}
1303 	return &ret;
1304 }
1305 
1306 int
os_library_symbol_init(struct library_symbol * libsym)1307 os_library_symbol_init(struct library_symbol *libsym)
1308 {
1309 	libsym->os = (struct os_library_symbol_data){};
1310 	return 0;
1311 }
1312 
1313 void
os_library_symbol_destroy(struct library_symbol * libsym)1314 os_library_symbol_destroy(struct library_symbol *libsym)
1315 {
1316 }
1317 
1318 int
os_library_symbol_clone(struct library_symbol * retp,struct library_symbol * libsym)1319 os_library_symbol_clone(struct library_symbol *retp,
1320 			struct library_symbol *libsym)
1321 {
1322 	retp->os = libsym->os;
1323 	return 0;
1324 }
1325 
1326 char *
linux_append_IFUNC_to_name(const char * name)1327 linux_append_IFUNC_to_name(const char *name)
1328 {
1329 #define S ".IFUNC"
1330 	char *tmp_name = malloc(strlen(name) + sizeof S);
1331 	if (tmp_name == NULL)
1332 		return NULL;
1333 	sprintf(tmp_name, "%s%s", name, S);
1334 #undef S
1335 	return tmp_name;
1336 }
1337 
1338 enum plt_status
os_elf_add_func_entry(struct process * proc,struct ltelf * lte,const GElf_Sym * sym,arch_addr_t addr,const char * name,struct library_symbol ** ret)1339 os_elf_add_func_entry(struct process *proc, struct ltelf *lte,
1340 		      const GElf_Sym *sym,
1341 		      arch_addr_t addr, const char *name,
1342 		      struct library_symbol **ret)
1343 {
1344 	if (GELF_ST_TYPE(sym->st_info) == STT_FUNC)
1345 		return PLT_DEFAULT;
1346 
1347 	bool ifunc = false;
1348 #ifdef STT_GNU_IFUNC
1349 	ifunc = GELF_ST_TYPE(sym->st_info) == STT_GNU_IFUNC;
1350 #endif
1351 
1352 	if (ifunc) {
1353 		char *tmp_name = linux_append_IFUNC_to_name(name);
1354 		struct library_symbol *tmp = malloc(sizeof *tmp);
1355 		if (tmp_name == NULL || tmp == NULL) {
1356 		fail:
1357 			free(tmp_name);
1358 			free(tmp);
1359 			return PLT_FAIL;
1360 		}
1361 
1362 		if (library_symbol_init(tmp, addr, tmp_name, 1,
1363 					LS_TOPLT_NONE) < 0)
1364 			goto fail;
1365 		tmp->proto = linux_IFUNC_prototype();
1366 		tmp->os.is_ifunc = 1;
1367 
1368 		*ret = tmp;
1369 		return PLT_OK;
1370 	}
1371 
1372 	*ret = NULL;
1373 	return PLT_OK;
1374 }
1375 
1376 static enum callback_status
libsym_at_address(struct library_symbol * libsym,void * addrp)1377 libsym_at_address(struct library_symbol *libsym, void *addrp)
1378 {
1379 	arch_addr_t addr = *(arch_addr_t *)addrp;
1380 	return CBS_STOP_IF(addr == libsym->enter_addr);
1381 }
1382 
1383 static void
ifunc_ret_hit(struct breakpoint * bp,struct process * proc)1384 ifunc_ret_hit(struct breakpoint *bp, struct process *proc)
1385 {
1386 	struct fetch_context *fetch = fetch_arg_init(LT_TOF_FUNCTION, proc,
1387 						     type_get_voidptr());
1388 	if (fetch == NULL)
1389 		return;
1390 
1391 	struct breakpoint *nbp = NULL;
1392 	int own_libsym = 0;
1393 	struct library_symbol *libsym = NULL;
1394 
1395 	struct value value;
1396 	value_init(&value, proc, NULL, type_get_voidptr(), 0);
1397 	size_t sz = value_size(&value, NULL);
1398 	union {
1399 		uint64_t u64;
1400 		uint32_t u32;
1401 		arch_addr_t a;
1402 	} u;
1403 
1404 	if (fetch_retval(fetch, LT_TOF_FUNCTIONR, proc,
1405 			 value.type, &value) < 0
1406 	    || sz > 8 /* Captures failure as well.  */
1407 	    || value_extract_buf(&value, (void *) &u, NULL) < 0) {
1408 	fail:
1409 		fprintf(stderr,
1410 			"Couldn't trace the function "
1411 			"indicated by IFUNC resolver.\n");
1412 		goto done;
1413 	}
1414 
1415 	assert(sz == 4 || sz == 8);
1416 	/* XXX double casts below:  */
1417 	if (sz == 4)
1418 		u.a = (arch_addr_t)(uintptr_t)u.u32;
1419 	else
1420 		u.a = (arch_addr_t)(uintptr_t)u.u64;
1421 	if (arch_translate_address_dyn(proc, u.a, &u.a) < 0) {
1422 		fprintf(stderr, "Couldn't OPD-translate the address returned"
1423 			" by the IFUNC resolver.\n");
1424 		goto done;
1425 	}
1426 
1427 	assert(bp->os.ret_libsym != NULL);
1428 
1429 	struct library *lib = bp->os.ret_libsym->lib;
1430 	assert(lib != NULL);
1431 
1432 	/* Look if we already have a symbol with this address.
1433 	 * Otherwise create a new one.  */
1434 	libsym = library_each_symbol(lib, NULL, libsym_at_address, &u.a);
1435 	if (libsym == NULL) {
1436 		libsym = malloc(sizeof *libsym);
1437 		char *name = strdup(bp->os.ret_libsym->name);
1438 
1439 		if (libsym == NULL
1440 		    || name == NULL
1441 		    || library_symbol_init(libsym, u.a, name, 1,
1442 					   LS_TOPLT_NONE) < 0) {
1443 			free(libsym);
1444 			free(name);
1445 			goto fail;
1446 		}
1447 
1448 		/* Snip the .IFUNC token.  */
1449 		*strrchr(name, '.') = 0;
1450 
1451 		own_libsym = 1;
1452 		library_add_symbol(lib, libsym);
1453 	}
1454 
1455 	nbp = malloc(sizeof *bp);
1456 	if (nbp == NULL || breakpoint_init(nbp, proc, u.a, libsym) < 0)
1457 		goto fail;
1458 
1459 	/* If there already is a breakpoint at that address, that is
1460 	 * suspicious, but whatever.  */
1461 	struct breakpoint *pre_bp = insert_breakpoint(proc, nbp);
1462 	if (pre_bp == NULL)
1463 		goto fail;
1464 	if (pre_bp == nbp) {
1465 		/* PROC took our breakpoint, so these resources are
1466 		 * not ours anymore.  */
1467 		nbp = NULL;
1468 		own_libsym = 0;
1469 	}
1470 
1471 done:
1472 	free(nbp);
1473 	if (own_libsym) {
1474 		library_symbol_destroy(libsym);
1475 		free(libsym);
1476 	}
1477 	fetch_arg_done(fetch);
1478 }
1479 
1480 static int
create_ifunc_ret_bp(struct breakpoint ** ret,struct breakpoint * bp,struct process * proc)1481 create_ifunc_ret_bp(struct breakpoint **ret,
1482 		    struct breakpoint *bp, struct process *proc)
1483 {
1484 	*ret = create_default_return_bp(proc);
1485 	if (*ret == NULL)
1486 		return -1;
1487 	static struct bp_callbacks cbs = {
1488 		.on_hit = ifunc_ret_hit,
1489 	};
1490 	breakpoint_set_callbacks(*ret, &cbs);
1491 
1492 	(*ret)->os.ret_libsym = bp->libsym;
1493 
1494 	return 0;
1495 }
1496 
1497 int
os_breakpoint_init(struct process * proc,struct breakpoint * bp)1498 os_breakpoint_init(struct process *proc, struct breakpoint *bp)
1499 {
1500 	if (bp->libsym != NULL && bp->libsym->os.is_ifunc) {
1501 		static struct bp_callbacks cbs = {
1502 			.get_return_bp = create_ifunc_ret_bp,
1503 		};
1504 		breakpoint_set_callbacks(bp, &cbs);
1505 	}
1506 	return 0;
1507 }
1508 
1509 void
os_breakpoint_destroy(struct breakpoint * bp)1510 os_breakpoint_destroy(struct breakpoint *bp)
1511 {
1512 }
1513 
1514 int
os_breakpoint_clone(struct breakpoint * retp,struct breakpoint * bp)1515 os_breakpoint_clone(struct breakpoint *retp, struct breakpoint *bp)
1516 {
1517 	return 0;
1518 }
1519