1 /* Target operations for the remote server for GDB.
2 Copyright (C) 2002, 2004, 2005, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8 It has been modified to integrate it in valgrind
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
24
25 #include "server.h"
26 #include "target.h"
27 #include "regdef.h"
28 #include "regcache.h"
29 #include "valgrind_low.h"
30 #include "gdb/signals.h"
31 #include "pub_core_aspacemgr.h"
32 #include "pub_core_machine.h"
33 #include "pub_core_threadstate.h"
34 #include "pub_core_transtab.h"
35 #include "pub_core_gdbserver.h"
36 #include "pub_core_debuginfo.h"
37
38
39 /* the_low_target defines the architecture specific aspects depending
40 on the cpu */
41 static struct valgrind_target_ops the_low_target;
42
43 static
image_ptid(unsigned long ptid)44 char *image_ptid(unsigned long ptid)
45 {
46 static char result[50]; // large enough
47 VG_(sprintf) (result, "id %ld", ptid);
48 return result;
49 }
50 #define get_thread(inf) ((struct thread_info *)(inf))
51 static
remove_thread_if_not_in_vg_threads(struct inferior_list_entry * inf)52 void remove_thread_if_not_in_vg_threads (struct inferior_list_entry *inf)
53 {
54 struct thread_info *thread = get_thread (inf);
55 if (!VG_(lwpid_to_vgtid)(thread_to_gdb_id(thread))) {
56 dlog(1, "removing gdb ptid %s\n",
57 image_ptid(thread_to_gdb_id(thread)));
58 remove_thread (thread);
59 }
60 }
61
62 /* synchronize threads known by valgrind and threads known by gdbserver */
63 static
valgrind_update_threads(int pid)64 void valgrind_update_threads (int pid)
65 {
66 ThreadId tid;
67 ThreadState *ts;
68 unsigned long ptid;
69 struct thread_info *ti;
70
71 /* call remove_thread for all gdb threads not in valgrind threads */
72 for_each_inferior (&all_threads, remove_thread_if_not_in_vg_threads);
73
74 /* call add_thread for all valgrind threads not known in gdb all_threads */
75 for (tid = 1; tid < VG_N_THREADS; tid++) {
76
77 #define LOCAL_THREAD_TRACE " ti* %p vgtid %d status %s as gdb ptid %s lwpid %d\n", \
78 ti, tid, VG_(name_of_ThreadStatus) (ts->status), \
79 image_ptid (ptid), ts->os_state.lwpid
80
81 if (VG_(is_valid_tid) (tid)) {
82 ts = VG_(get_ThreadState) (tid);
83 ptid = ts->os_state.lwpid;
84 ti = gdb_id_to_thread (ptid);
85 if (!ti) {
86 /* we do not report the threads which are not yet fully
87 initialized otherwise this creates duplicated threads
88 in gdb: once with pid xxx lwpid 0, then after that
89 with pid xxx lwpid yyy. */
90 if (ts->status != VgTs_Init) {
91 dlog(1, "adding_thread" LOCAL_THREAD_TRACE);
92 add_thread (ptid, ts, ptid);
93 }
94 } else {
95 dlog(2, "(known thread)" LOCAL_THREAD_TRACE);
96 }
97 }
98 #undef LOCAL_THREAD_TRACE
99 }
100 }
101
102 static
build_shadow_arch(struct reg * reg_defs,int n)103 struct reg* build_shadow_arch (struct reg *reg_defs, int n) {
104 int i, r;
105 static const char *postfix[3] = { "", "s1", "s2" };
106 struct reg *new_regs = malloc(3 * n * sizeof(reg_defs[0]));
107 int reg_set_len = reg_defs[n-1].offset + reg_defs[n-1].size;
108
109 for (i = 0; i < 3; i++) {
110 for (r = 0; r < n; r++) {
111 char *regname = malloc(strlen(reg_defs[r].name)
112 + strlen (postfix[i]) + 1);
113 strcpy (regname, reg_defs[r].name);
114 strcat (regname, postfix[i]);
115 new_regs[i*n + r].name = regname;
116 new_regs[i*n + r].offset = i*reg_set_len + reg_defs[r].offset;
117 new_regs[i*n + r].size = reg_defs[r].size;
118 dlog(1,
119 "%-10s Nr %d offset(bit) %d offset(byte) %d size(bit) %d\n",
120 new_regs[i*n + r].name, i*n + r, new_regs[i*n + r].offset,
121 (new_regs[i*n + r].offset) / 8, new_regs[i*n + r].size);
122 }
123 }
124
125 return new_regs;
126 }
127
128
129 static CORE_ADDR stopped_data_address = 0;
VG_(set_watchpoint_stop_address)130 void VG_(set_watchpoint_stop_address) (Addr addr)
131 {
132 stopped_data_address = addr;
133 }
134
valgrind_stopped_by_watchpoint(void)135 int valgrind_stopped_by_watchpoint (void)
136 {
137 return stopped_data_address != 0;
138 }
139
valgrind_stopped_data_address(void)140 CORE_ADDR valgrind_stopped_data_address (void)
141 {
142 return stopped_data_address;
143 }
144
145 /* pc at which we last stopped */
146 static CORE_ADDR stop_pc;
147
148 /* pc at which we resume.
149 If stop_pc != resume_pc, it means
150 gdb/gdbserver has changed the pc so as to have either
151 a "continue by jumping at that address"
152 or a "continue at that address to call some code from gdb".
153 */
154 static CORE_ADDR resume_pc;
155
156 static vki_siginfo_t vki_signal_to_report;
157 static vki_siginfo_t vki_signal_to_deliver;
158
gdbserver_signal_encountered(const vki_siginfo_t * info)159 void gdbserver_signal_encountered (const vki_siginfo_t *info)
160 {
161 vki_signal_to_report = *info;
162 vki_signal_to_deliver = *info;
163 }
164
gdbserver_pending_signal_to_report(vki_siginfo_t * info)165 void gdbserver_pending_signal_to_report (vki_siginfo_t *info)
166 {
167 *info = vki_signal_to_report;
168 }
169
gdbserver_deliver_signal(vki_siginfo_t * info)170 Bool gdbserver_deliver_signal (vki_siginfo_t *info)
171 {
172 if (info->si_signo != vki_signal_to_deliver.si_signo)
173 dlog(1, "GDB changed signal info %d to_report %d to_deliver %d\n",
174 info->si_signo, vki_signal_to_report.si_signo,
175 vki_signal_to_deliver.si_signo);
176 *info = vki_signal_to_deliver;
177 return vki_signal_to_deliver.si_signo != 0;
178 }
179
180 static unsigned char exit_status_to_report;
181 static int exit_code_to_report;
gdbserver_process_exit_encountered(unsigned char status,Int code)182 void gdbserver_process_exit_encountered (unsigned char status, Int code)
183 {
184 vg_assert (status == 'W' || status == 'X');
185 exit_status_to_report = status;
186 exit_code_to_report = code;
187 }
188
189 static
sym(Addr addr)190 const HChar* sym (Addr addr)
191 {
192 return VG_(describe_IP) (addr, NULL);
193 }
194
195 ThreadId vgdb_interrupted_tid = 0;
196
197 /* 0 => not single stepping.
198 1 => single stepping asked by gdb
199 2 => single stepping asked by valgrind (watchpoint) */
200 static int stepping = 0;
201
valgrind_get_ignore_break_once(void)202 Addr valgrind_get_ignore_break_once(void)
203 {
204 if (valgrind_single_stepping())
205 return resume_pc;
206 else
207 return 0;
208 }
209
valgrind_set_single_stepping(Bool set)210 void valgrind_set_single_stepping(Bool set)
211 {
212 if (set)
213 stepping = 2;
214 else
215 stepping = 0;
216 }
217
valgrind_single_stepping(void)218 Bool valgrind_single_stepping(void)
219 {
220 if (stepping)
221 return True;
222 else
223 return False;
224 }
225
valgrind_thread_alive(unsigned long tid)226 int valgrind_thread_alive (unsigned long tid)
227 {
228 struct thread_info *ti = gdb_id_to_thread(tid);
229 ThreadState *tst;
230
231 if (ti != NULL) {
232 tst = (ThreadState *) inferior_target_data (ti);
233 return tst->status != VgTs_Zombie;
234 }
235 else {
236 return 0;
237 }
238 }
239
valgrind_resume(struct thread_resume * resume_info)240 void valgrind_resume (struct thread_resume *resume_info)
241 {
242 dlog(1,
243 "resume_info step %d sig %d stepping %d\n",
244 resume_info->step,
245 resume_info->sig,
246 stepping);
247 if (valgrind_stopped_by_watchpoint()) {
248 dlog(1, "clearing watchpoint stopped_data_address %p\n",
249 C2v(stopped_data_address));
250 VG_(set_watchpoint_stop_address) ((Addr) 0);
251 }
252 vki_signal_to_deliver.si_signo = resume_info->sig;
253 /* signal was reported to GDB, GDB told us to resume execution.
254 So, reset the signal to report to 0. */
255 VG_(memset) (&vki_signal_to_report, 0, sizeof(vki_signal_to_report));
256
257 stepping = resume_info->step;
258 resume_pc = (*the_low_target.get_pc) ();
259 if (resume_pc != stop_pc) {
260 dlog(1,
261 "stop_pc %p changed to be resume_pc %s\n",
262 C2v(stop_pc), sym(resume_pc));
263 }
264 regcache_invalidate();
265 }
266
valgrind_wait(char * ourstatus)267 unsigned char valgrind_wait (char *ourstatus)
268 {
269 int pid;
270 unsigned long wptid;
271 ThreadState *tst;
272 enum target_signal sig;
273 int code;
274
275 pid = VG_(getpid) ();
276 dlog(1, "enter valgrind_wait pid %d\n", pid);
277
278 regcache_invalidate();
279 valgrind_update_threads(pid);
280
281 /* First see if we are done with this process. */
282 if (exit_status_to_report != 0) {
283 *ourstatus = exit_status_to_report;
284 exit_status_to_report = 0;
285
286 if (*ourstatus == 'W') {
287 code = exit_code_to_report;
288 exit_code_to_report = 0;
289 dlog(1, "exit valgrind_wait status W exit code %d\n", code);
290 return code;
291 }
292
293 if (*ourstatus == 'X') {
294 sig = target_signal_from_host(exit_code_to_report);
295 exit_code_to_report = 0;
296 dlog(1, "exit valgrind_wait status X signal %d\n", sig);
297 return sig;
298 }
299 }
300
301 /* in valgrind, we consider that a wait always succeeds with STOPPED 'T'
302 and with a signal TRAP (i.e. a breakpoint), unless there is
303 a signal to report. */
304 *ourstatus = 'T';
305 if (vki_signal_to_report.si_signo == 0)
306 sig = TARGET_SIGNAL_TRAP;
307 else
308 sig = target_signal_from_host(vki_signal_to_report.si_signo);
309
310 if (vgdb_interrupted_tid != 0)
311 tst = VG_(get_ThreadState) (vgdb_interrupted_tid);
312 else
313 tst = VG_(get_ThreadState) (VG_(running_tid));
314 wptid = tst->os_state.lwpid;
315 /* we can only change the current_inferior when the wptid references
316 an existing thread. Otherwise, we are still in the init phase.
317 (hack similar to main thread hack in valgrind_update_threads) */
318 if (tst->os_state.lwpid)
319 current_inferior = gdb_id_to_thread (wptid);
320 stop_pc = (*the_low_target.get_pc) ();
321
322 dlog(1,
323 "exit valgrind_wait status T ptid %s stop_pc %s signal %d\n",
324 image_ptid (wptid), sym (stop_pc), sig);
325 return sig;
326 }
327
328 /* Fetch one register from valgrind VEX guest state. */
329 static
fetch_register(int regno)330 void fetch_register (int regno)
331 {
332 int size;
333 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
334 ThreadId tid = tst->tid;
335
336 if (regno >= the_low_target.num_regs) {
337 dlog(0, "error fetch_register regno %d max %d\n",
338 regno, the_low_target.num_regs);
339 return;
340 }
341 size = register_size (regno);
342 if (size > 0) {
343 Bool mod;
344 char buf [size];
345 VG_(memset) (buf, 0, size); // registers not fetched will be seen as 0.
346 (*the_low_target.transfer_register) (tid, regno, buf,
347 valgrind_to_gdbserver, size, &mod);
348 // Note: the *mod received from transfer_register is not interesting.
349 // We are interested to see if the register data in the register cache is modified.
350 supply_register (regno, buf, &mod);
351 if (mod && VG_(debugLog_getLevel)() > 1) {
352 char bufimage [2*size + 1];
353 heximage (bufimage, buf, size);
354 dlog(3, "fetched register %d size %d name %s value %s tid %d status %s\n",
355 regno, size, the_low_target.reg_defs[regno].name, bufimage,
356 tid, VG_(name_of_ThreadStatus) (tst->status));
357 }
358 }
359 }
360
361 /* Fetch all registers, or just one, from the child process. */
362 static
usr_fetch_inferior_registers(int regno)363 void usr_fetch_inferior_registers (int regno)
364 {
365 if (regno == -1 || regno == 0)
366 for (regno = 0; regno < the_low_target.num_regs; regno++)
367 fetch_register (regno);
368 else
369 fetch_register (regno);
370 }
371
372 /* Store our register values back into the inferior.
373 If REGNO is -1, do this for all registers.
374 Otherwise, REGNO specifies which register (so we can save time). */
375 static
usr_store_inferior_registers(int regno)376 void usr_store_inferior_registers (int regno)
377 {
378 int size;
379 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
380 ThreadId tid = tst->tid;
381
382 if (regno >= 0) {
383
384 if (regno >= the_low_target.num_regs) {
385 dlog(0, "error store_register regno %d max %d\n",
386 regno, the_low_target.num_regs);
387 return;
388 }
389
390 size = register_size (regno);
391 if (size > 0) {
392 Bool mod;
393 Addr old_SP, new_SP;
394 char buf[size];
395
396 if (regno == the_low_target.stack_pointer_regno) {
397 /* When the stack pointer register is changed such that
398 the stack is extended, we better inform the tool of the
399 stack increase. This is needed in particular to avoid
400 spurious Memcheck errors during Inferior calls. So, we
401 save in old_SP the SP before the change. A change of
402 stack pointer is also assumed to have initialised this
403 new stack space. For the typical example of an inferior
404 call, gdb writes arguments on the stack, and then
405 changes the stack pointer. As the stack increase tool
406 function might mark it as undefined, we have to call it
407 at the good moment. */
408 VG_(memset) ((void *) &old_SP, 0, size);
409 (*the_low_target.transfer_register) (tid, regno, (void *) &old_SP,
410 valgrind_to_gdbserver, size, &mod);
411 }
412
413 VG_(memset) (buf, 0, size);
414 collect_register (regno, buf);
415 (*the_low_target.transfer_register) (tid, regno, buf,
416 gdbserver_to_valgrind, size, &mod);
417 if (mod && VG_(debugLog_getLevel)() > 1) {
418 char bufimage [2*size + 1];
419 heximage (bufimage, buf, size);
420 dlog(2,
421 "stored register %d size %d name %s value %s "
422 "tid %d status %s\n",
423 regno, size, the_low_target.reg_defs[regno].name, bufimage,
424 tid, VG_(name_of_ThreadStatus) (tst->status));
425 }
426 if (regno == the_low_target.stack_pointer_regno) {
427 VG_(memcpy) (&new_SP, buf, size);
428 if (old_SP > new_SP) {
429 Word delta = (Word)new_SP - (Word)old_SP;
430 dlog(1,
431 " stack increase by stack pointer changed from %p to %p "
432 "delta %ld\n",
433 (void*) old_SP, (void *) new_SP,
434 delta);
435 VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, 0 );
436 VG_TRACK( new_mem_stack, new_SP, -delta );
437 VG_TRACK( post_mem_write, Vg_CoreClientReq, tid,
438 new_SP, -delta);
439 }
440 }
441 }
442 }
443 else {
444 for (regno = 0; regno < the_low_target.num_regs; regno++)
445 usr_store_inferior_registers (regno);
446 }
447 }
448
valgrind_fetch_registers(int regno)449 void valgrind_fetch_registers (int regno)
450 {
451 usr_fetch_inferior_registers (regno);
452 }
453
valgrind_store_registers(int regno)454 void valgrind_store_registers (int regno)
455 {
456 usr_store_inferior_registers (regno);
457 }
458
459 Bool hostvisibility = False;
460
valgrind_read_memory(CORE_ADDR memaddr,unsigned char * myaddr,int len)461 int valgrind_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
462 {
463 const void *sourceaddr = C2v (memaddr);
464 dlog(3, "reading memory %p size %d\n", sourceaddr, len);
465 if (VG_(am_is_valid_for_client) ((Addr) sourceaddr,
466 len, VKI_PROT_READ)
467 || (hostvisibility
468 && VG_(am_is_valid_for_valgrind) ((Addr) sourceaddr,
469 len, VKI_PROT_READ))) {
470 VG_(memcpy) (myaddr, sourceaddr, len);
471 return 0;
472 } else {
473 dlog(1, "error reading memory %p size %d\n", sourceaddr, len);
474 return -1;
475 }
476 }
477
valgrind_write_memory(CORE_ADDR memaddr,const unsigned char * myaddr,int len)478 int valgrind_write_memory (CORE_ADDR memaddr,
479 const unsigned char *myaddr, int len)
480 {
481 Bool is_valid_client_memory;
482 void *targetaddr = C2v (memaddr);
483 dlog(3, "writing memory %p size %d\n", targetaddr, len);
484 is_valid_client_memory
485 = VG_(am_is_valid_for_client) ((Addr)targetaddr, len, VKI_PROT_WRITE);
486 if (is_valid_client_memory
487 || (hostvisibility
488 && VG_(am_is_valid_for_valgrind) ((Addr) targetaddr,
489 len, VKI_PROT_READ))) {
490 if (len > 0) {
491 VG_(memcpy) (targetaddr, myaddr, len);
492 if (is_valid_client_memory && VG_(tdict).track_post_mem_write) {
493 /* Inform the tool of the post memwrite. Note that we do the
494 minimum necessary to avoid complains from e.g.
495 memcheck. The idea is that the debugger is as least
496 intrusive as possible. So, we do not inform of the pre
497 mem write (and in any case, this would cause problems with
498 memcheck that does not like our CorePart in
499 pre_mem_write. */
500 ThreadState *tst =
501 (ThreadState *) inferior_target_data (current_inferior);
502 ThreadId tid = tst->tid;
503 VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid,
504 (Addr) targetaddr, len );
505 }
506 }
507 return 0;
508 } else {
509 dlog(1, "error writing memory %p size %d\n", targetaddr, len);
510 return -1;
511 }
512 }
513
514 /* insert or remove a breakpoint */
515 static
valgrind_point(Bool insert,char type,CORE_ADDR addr,int len)516 int valgrind_point (Bool insert, char type, CORE_ADDR addr, int len)
517 {
518 PointKind kind;
519 switch (type) {
520 case '0': /* implemented by inserting checks at each instruction in sb */
521 kind = software_breakpoint;
522 break;
523 case '1': /* hw breakpoint, same implementation as sw breakpoint */
524 kind = hardware_breakpoint;
525 break;
526 case '2':
527 kind = write_watchpoint;
528 break;
529 case '3':
530 kind = read_watchpoint;
531 break;
532 case '4':
533 kind = access_watchpoint;
534 break;
535 default:
536 vg_assert (0);
537 }
538
539 /* Attention: gdbserver convention differs: 0 means ok; 1 means not ok */
540 if (VG_(gdbserver_point) (kind, insert, addr, len))
541 return 0;
542 else
543 return 1; /* error or unsupported */
544 }
545
valgrind_target_xml(Bool shadow_mode)546 const char* valgrind_target_xml (Bool shadow_mode)
547 {
548 return (*the_low_target.target_xml) (shadow_mode);
549 }
550
valgrind_insert_watchpoint(char type,CORE_ADDR addr,int len)551 int valgrind_insert_watchpoint (char type, CORE_ADDR addr, int len)
552 {
553 return valgrind_point (/* insert */ True, type, addr, len);
554 }
555
valgrind_remove_watchpoint(char type,CORE_ADDR addr,int len)556 int valgrind_remove_watchpoint (char type, CORE_ADDR addr, int len)
557 {
558 return valgrind_point (/* insert*/ False, type, addr, len);
559 }
560
561 /* Returns the (platform specific) offset of lm_modid field in the link map
562 struct.
563 Stores the offset in *result and returns True if offset can be determined.
564 Returns False otherwise. *result is not to be used then. */
getplatformoffset(SizeT * result)565 static Bool getplatformoffset (SizeT *result)
566 {
567 static Bool getplatformoffset_called = False;
568
569 static Bool lm_modid_offset_found = False;
570 static SizeT lm_modid_offset = 1u << 31; // Rubbish initial value.
571 // lm_modid_offset is a magic offset, retrieved using an external program.
572
573 if (!getplatformoffset_called) {
574 getplatformoffset_called = True;
575 const HChar *platform = VG_PLATFORM;
576 const HChar *cmdformat = "%s/%s-%s -o %s";
577 const HChar *getoff = "getoff";
578 HChar outfile[VG_(mkstemp_fullname_bufsz) (VG_(strlen)(getoff))];
579 Int fd = VG_(mkstemp) (getoff, outfile);
580 if (fd == -1)
581 return False;
582 HChar cmd[ VG_(strlen)(cmdformat)
583 + VG_(strlen)(VG_(libdir)) - 2
584 + VG_(strlen)(getoff) - 2
585 + VG_(strlen)(platform) - 2
586 + VG_(strlen)(outfile) - 2
587 + 1];
588 UInt cmdlen;
589 struct vg_stat stat_buf;
590 Int ret;
591
592 cmdlen = VG_(snprintf)(cmd, sizeof(cmd),
593 cmdformat,
594 VG_(libdir), getoff, platform, outfile);
595 vg_assert (cmdlen == sizeof(cmd) - 1);
596 ret = VG_(system) (cmd);
597 if (ret != 0 || VG_(debugLog_getLevel)() >= 1)
598 VG_(dmsg) ("command %s exit code %d\n", cmd, ret);
599 ret = VG_(fstat)( fd, &stat_buf );
600 if (ret != 0)
601 VG_(dmsg) ("error VG_(fstat) %d %s\n", fd, outfile);
602 else {
603 HChar *w;
604 HChar *ssaveptr;
605 HChar *os;
606 HChar *str;
607 HChar *endptr;
608
609 os = malloc (stat_buf.size+1);
610 vg_assert (os);
611 ret = VG_(read)(fd, os, stat_buf.size);
612 vg_assert(ret == stat_buf.size);
613 os[ret] = '\0';
614 str = os;
615 while ((w = VG_(strtok_r)(str, " \n", &ssaveptr)) != NULL) {
616 if (VG_(strcmp) (w, "lm_modid_offset") == 0) {
617 w = VG_(strtok_r)(NULL, " \n", &ssaveptr);
618 lm_modid_offset = (SizeT) VG_(strtoull16) ( w, &endptr );
619 if (endptr == w)
620 VG_(dmsg) ("%s lm_modid_offset unexpected hex value %s\n",
621 cmd, w);
622 else
623 lm_modid_offset_found = True;
624 } else {
625 VG_(dmsg) ("%s produced unexpected %s\n", cmd, w);
626 }
627 str = NULL; // ensure next VG_(strtok_r) continues the parsing.
628 }
629 VG_(free) (os);
630 }
631
632 VG_(close)(fd);
633 ret = VG_(unlink)( outfile );
634 if (ret != 0)
635 VG_(umsg) ("error: could not unlink %s\n", outfile);
636 }
637
638 *result = lm_modid_offset;
639 return lm_modid_offset_found;
640 }
641
valgrind_get_tls_addr(ThreadState * tst,CORE_ADDR offset,CORE_ADDR lm,CORE_ADDR * tls_addr)642 Bool valgrind_get_tls_addr (ThreadState *tst,
643 CORE_ADDR offset,
644 CORE_ADDR lm,
645 CORE_ADDR *tls_addr)
646 {
647 CORE_ADDR **dtv_loc;
648 CORE_ADDR *dtv;
649 SizeT lm_modid_offset;
650 unsigned long int modid;
651
652 #define CHECK_DEREF(addr, len, name) \
653 if (!VG_(am_is_valid_for_client) ((Addr)(addr), (len), VKI_PROT_READ)) { \
654 dlog(0, "get_tls_addr: %s at %p len %lu not addressable\n", \
655 name, (void*)(addr), (unsigned long)(len)); \
656 return False; \
657 }
658
659 *tls_addr = 0;
660
661 if (the_low_target.target_get_dtv == NULL) {
662 dlog(1, "low level dtv support not available\n");
663 return False;
664 }
665
666 if (!getplatformoffset (&lm_modid_offset)) {
667 dlog(0, "link_map modid field offset not available\n");
668 return False;
669 }
670 dlog (2, "link_map modid offset %p\n", (void*)lm_modid_offset);
671 vg_assert (lm_modid_offset < 0x10000); // let's say
672
673 dtv_loc = (*the_low_target.target_get_dtv)(tst);
674 if (dtv_loc == NULL) {
675 dlog(0, "low level dtv support returned NULL\n");
676 return False;
677 }
678
679 CHECK_DEREF(dtv_loc, sizeof(CORE_ADDR), "dtv_loc");
680 dtv = *dtv_loc;
681
682 // Check we can read at least 2 address at the beginning of dtv.
683 CHECK_DEREF(dtv, 2*sizeof(CORE_ADDR), "dtv 2 first entries");
684 dlog (2, "tid %d dtv %p\n", tst->tid, (void*)dtv);
685
686 // Check we can read the modid
687 CHECK_DEREF(lm+lm_modid_offset, sizeof(unsigned long int), "link_map modid");
688 modid = *(unsigned long int *)(lm+lm_modid_offset);
689
690 // Check we can access the dtv entry for modid
691 CHECK_DEREF(dtv + 2 * modid, sizeof(CORE_ADDR), "dtv[2*modid]");
692
693 // And finally compute the address of the tls variable.
694 *tls_addr = *(dtv + 2 * modid) + offset;
695
696 return True;
697
698 #undef CHECK_DEREF
699 }
700
701 /* returns a pointer to the architecture state corresponding to
702 the provided register set: 0 => normal guest registers,
703 1 => shadow1
704 2 => shadow2
705 */
get_arch(int set,ThreadState * tst)706 VexGuestArchState* get_arch (int set, ThreadState* tst)
707 {
708 switch (set) {
709 case 0: return &tst->arch.vex;
710 case 1: return &tst->arch.vex_shadow1;
711 case 2: return &tst->arch.vex_shadow2;
712 default: vg_assert(0);
713 }
714 }
715
716 static int non_shadow_num_regs = 0;
717 static struct reg *non_shadow_reg_defs = NULL;
initialize_shadow_low(Bool shadow_mode)718 void initialize_shadow_low(Bool shadow_mode)
719 {
720 if (non_shadow_reg_defs == NULL) {
721 non_shadow_reg_defs = the_low_target.reg_defs;
722 non_shadow_num_regs = the_low_target.num_regs;
723 }
724
725 regcache_invalidate();
726 if (the_low_target.reg_defs != non_shadow_reg_defs) {
727 free (the_low_target.reg_defs);
728 }
729 if (shadow_mode) {
730 the_low_target.num_regs = 3 * non_shadow_num_regs;
731 the_low_target.reg_defs = build_shadow_arch (non_shadow_reg_defs, non_shadow_num_regs);
732 } else {
733 the_low_target.num_regs = non_shadow_num_regs;
734 the_low_target.reg_defs = non_shadow_reg_defs;
735 }
736 set_register_cache (the_low_target.reg_defs, the_low_target.num_regs);
737 }
738
set_desired_inferior(int use_general)739 void set_desired_inferior (int use_general)
740 {
741 struct thread_info *found;
742
743 if (use_general == 1) {
744 found = (struct thread_info *) find_inferior_id (&all_threads,
745 general_thread);
746 } else {
747 found = NULL;
748
749 /* If we are continuing any (all) thread(s), use step_thread
750 to decide which thread to step and/or send the specified
751 signal to. */
752 if ((step_thread != 0 && step_thread != -1)
753 && (cont_thread == 0 || cont_thread == -1))
754 found = (struct thread_info *) find_inferior_id (&all_threads,
755 step_thread);
756
757 if (found == NULL)
758 found = (struct thread_info *) find_inferior_id (&all_threads,
759 cont_thread);
760 }
761
762 if (found == NULL)
763 current_inferior = (struct thread_info *) all_threads.head;
764 else
765 current_inferior = found;
766 {
767 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
768 ThreadId tid = tst->tid;
769 dlog(1, "set_desired_inferior use_general %d found %p tid %d lwpid %d\n",
770 use_general, found, tid, tst->os_state.lwpid);
771 }
772 }
773
VG_(dmemcpy)774 void* VG_(dmemcpy) ( void *d, const void *s, SizeT sz, Bool *mod )
775 {
776 if (VG_(memcmp) (d, s, sz)) {
777 *mod = True;
778 return VG_(memcpy) (d, s, sz);
779 } else {
780 *mod = False;
781 return d;
782 }
783 }
784
VG_(transfer)785 void VG_(transfer) (void *valgrind,
786 void *gdbserver,
787 transfer_direction dir,
788 SizeT sz,
789 Bool *mod)
790 {
791 if (dir == valgrind_to_gdbserver)
792 VG_(dmemcpy) (gdbserver, valgrind, sz, mod);
793 else if (dir == gdbserver_to_valgrind)
794 VG_(dmemcpy) (valgrind, gdbserver, sz, mod);
795 else
796 vg_assert (0);
797 }
798
valgrind_initialize_target(void)799 void valgrind_initialize_target(void)
800 {
801 #if defined(VGA_x86)
802 x86_init_architecture(&the_low_target);
803 #elif defined(VGA_amd64)
804 amd64_init_architecture(&the_low_target);
805 #elif defined(VGA_arm)
806 arm_init_architecture(&the_low_target);
807 #elif defined(VGA_arm64)
808 arm64_init_architecture(&the_low_target);
809 #elif defined(VGA_ppc32)
810 ppc32_init_architecture(&the_low_target);
811 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
812 ppc64_init_architecture(&the_low_target);
813 #elif defined(VGA_s390x)
814 s390x_init_architecture(&the_low_target);
815 #elif defined(VGA_mips32)
816 mips32_init_architecture(&the_low_target);
817 #elif defined(VGA_mips64)
818 mips64_init_architecture(&the_low_target);
819 #elif defined(VGA_tilegx)
820 tilegx_init_architecture(&the_low_target);
821 #else
822 #error "architecture missing in target.c valgrind_initialize_target"
823 #endif
824 }
825