1
2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: stuff common to all platforms ---*/
4 /*--- ---*/
5 /*--- m_aspacemgr-common.c ---*/
6 /*--------------------------------------------------------------------*/
7
8 /*
9 This file is part of Valgrind, a dynamic binary instrumentation
10 framework.
11
12 Copyright (C) 2006-2015 OpenWorks LLP
13 info@open-works.co.uk
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31 */
32
33 /* *************************************************************
34 DO NOT INCLUDE ANY OTHER FILES HERE.
35 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
36 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
37 ************************************************************* */
38
39 #include "priv_aspacemgr.h"
40 #include "config.h"
41
42
43 /*-----------------------------------------------------------------*/
44 /*--- ---*/
45 /*--- Stuff to make aspacem almost completely independent of ---*/
46 /*--- the rest of Valgrind. ---*/
47 /*--- ---*/
48 /*-----------------------------------------------------------------*/
49
50 //--------------------------------------------------------------
51 // Simple assert and assert-like fns, which avoid dependence on
52 // m_libcassert, and hence on the entire debug-info reader swamp
53
54 __attribute__ ((noreturn))
ML_(am_exit)55 void ML_(am_exit)( Int status )
56 {
57 VG_(exit_now) (status);
58 }
59
ML_(am_barf)60 void ML_(am_barf) ( const HChar* what )
61 {
62 VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
63 VG_(debugLog)(0, "aspacem", "Exiting now.\n");
64 ML_(am_exit)(1);
65 }
66
ML_(am_barf_toolow)67 void ML_(am_barf_toolow) ( const HChar* what )
68 {
69 VG_(debugLog)(0, "aspacem",
70 "Valgrind: FATAL: %s is too low.\n", what);
71 VG_(debugLog)(0, "aspacem", " Increase it and rebuild. "
72 "Exiting now.\n");
73 ML_(am_exit)(1);
74 }
75
ML_(am_assert_fail)76 void ML_(am_assert_fail)( const HChar* expr,
77 const HChar* file,
78 Int line,
79 const HChar* fn )
80 {
81 VG_(debugLog)(0, "aspacem",
82 "Valgrind: FATAL: aspacem assertion failed:\n");
83 VG_(debugLog)(0, "aspacem", " %s\n", expr);
84 VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn);
85 VG_(debugLog)(0, "aspacem", "Exiting now.\n");
86 ML_(am_exit)(1);
87 }
88
ML_(am_getpid)89 Int ML_(am_getpid)( void )
90 {
91 SysRes sres = VG_(do_syscall0)(__NR_getpid);
92 aspacem_assert(!sr_isError(sres));
93 return sr_Res(sres);
94 }
95
96
97 //--------------------------------------------------------------
98 // A simple sprintf implementation, so as to avoid dependence on
99 // m_libcprint.
100
local_add_to_aspacem_sprintf_buf(HChar c,void * p)101 static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p )
102 {
103 HChar** aspacem_sprintf_ptr = p;
104 *(*aspacem_sprintf_ptr)++ = c;
105 }
106
107 static
local_vsprintf(HChar * buf,const HChar * format,va_list vargs)108 UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
109 {
110 Int ret;
111 HChar *aspacem_sprintf_ptr = buf;
112
113 ret = VG_(debugLog_vprintf)
114 ( local_add_to_aspacem_sprintf_buf,
115 &aspacem_sprintf_ptr, format, vargs );
116 local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
117
118 return ret;
119 }
120
ML_(am_sprintf)121 UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... )
122 {
123 UInt ret;
124 va_list vargs;
125
126 va_start(vargs,format);
127 ret = local_vsprintf(buf, format, vargs);
128 va_end(vargs);
129
130 return ret;
131 }
132
133
134 //--------------------------------------------------------------
135 // Direct access to a handful of syscalls. This avoids dependence on
136 // m_libc*. THESE DO NOT UPDATE THE aspacem-internal DATA
137 // STRUCTURES (SEGMENT ARRAY). DO NOT USE THEM UNLESS YOU KNOW WHAT
138 // YOU ARE DOING.
139
140 /* --- Pertaining to mappings --- */
141
142 /* Note: this is VG_, not ML_. */
VG_(am_do_mmap_NO_NOTIFY)143 SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
144 UInt flags, Int fd, Off64T offset)
145 {
146 SysRes res;
147 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
148
149 # if defined(VGP_arm64_linux)
150 res = VG_(do_syscall6)(__NR3264_mmap, (UWord)start, length,
151 prot, flags, fd, offset);
152 # elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
153 || defined(VGP_arm_linux)
154 /* mmap2 uses 4096 chunks even if actual page size is bigger. */
155 aspacem_assert((offset % 4096) == 0);
156 res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
157 prot, flags, fd, offset / 4096);
158 # elif defined(VGP_amd64_linux) \
159 || defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
160 || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
161 || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
162 || defined(VGP_tilegx_linux)
163 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
164 prot, flags, fd, offset);
165 # elif defined(VGP_x86_darwin)
166 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
167 fd = -1; // MAP_ANON with fd==0 is EINVAL
168 }
169 res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
170 prot, flags, fd, offset & 0xffffffff, offset >> 32);
171 # elif defined(VGP_amd64_darwin)
172 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
173 fd = -1; // MAP_ANON with fd==0 is EINVAL
174 }
175 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
176 prot, flags, (UInt)fd, offset);
177 # elif defined(VGP_x86_solaris)
178 /* MAP_ANON with fd==0 is EINVAL. */
179 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS))
180 fd = -1;
181 res = VG_(do_syscall7)(__NR_mmap64, (UWord)start, length, prot, flags,
182 (UInt)fd, offset & 0xffffffff, offset >> 32);
183 # elif defined(VGP_amd64_solaris)
184 /* MAP_ANON with fd==0 is EINVAL. */
185 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS))
186 fd = -1;
187 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, prot, flags,
188 (UInt)fd, offset);
189 # else
190 # error Unknown platform
191 # endif
192 return res;
193 }
194
195 static
local_do_mprotect_NO_NOTIFY(Addr start,SizeT length,UInt prot)196 SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
197 {
198 return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
199 }
200
ML_(am_do_munmap_NO_NOTIFY)201 SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length)
202 {
203 return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
204 }
205
206 #if HAVE_MREMAP
207 /* The following are used only to implement mremap(). */
208
ML_(am_do_extend_mapping_NO_NOTIFY)209 SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
210 Addr old_addr,
211 SizeT old_len,
212 SizeT new_len
213 )
214 {
215 /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
216 new_len, WITHOUT moving it. If it can't be extended in place,
217 fail. */
218 # if defined(VGO_linux)
219 return VG_(do_syscall5)(
220 __NR_mremap,
221 old_addr, old_len, new_len,
222 0/*flags, meaning: must be at old_addr, else FAIL */,
223 0/*new_addr, is ignored*/
224 );
225 # else
226 # error Unknown OS
227 # endif
228 }
229
ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)230 SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
231 Addr old_addr, Addr old_len,
232 Addr new_addr, Addr new_len
233 )
234 {
235 /* Move the mapping old_addr .. old_addr+old_len-1 to the new
236 location and with the new length. Only needs to handle the case
237 where the two areas do not overlap, neither length is zero, and
238 all args are page aligned. */
239 # if defined(VGO_linux)
240 return VG_(do_syscall5)(
241 __NR_mremap,
242 old_addr, old_len, new_len,
243 VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
244 new_addr
245 );
246 # else
247 # error Unknown OS
248 # endif
249 }
250
251 #endif
252
253 /* --- Pertaining to files --- */
254
ML_(am_open)255 SysRes ML_(am_open) ( const HChar* pathname, Int flags, Int mode )
256 {
257 # if defined(VGP_arm64_linux)
258 /* ARM64 wants to use __NR_openat rather than __NR_open. */
259 SysRes res = VG_(do_syscall4)(__NR_openat,
260 VKI_AT_FDCWD, (UWord)pathname, flags, mode);
261 # elif defined(VGP_tilegx_linux)
262 SysRes res = VG_(do_syscall4)(__NR_openat, VKI_AT_FDCWD, (UWord)pathname,
263 flags, mode);
264 # elif defined(VGO_linux) || defined(VGO_darwin)
265 SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
266 # elif defined(VGO_solaris)
267 SysRes res = VG_(do_syscall4)(__NR_openat, VKI_AT_FDCWD, (UWord)pathname,
268 flags, mode);
269 # else
270 # error Unknown OS
271 # endif
272 return res;
273 }
274
ML_(am_read)275 Int ML_(am_read) ( Int fd, void* buf, Int count)
276 {
277 SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
278 return sr_isError(res) ? -1 : sr_Res(res);
279 }
280
ML_(am_close)281 void ML_(am_close) ( Int fd )
282 {
283 (void)VG_(do_syscall1)(__NR_close, fd);
284 }
285
ML_(am_readlink)286 Int ML_(am_readlink)(const HChar* path, HChar* buf, UInt bufsiz)
287 {
288 SysRes res;
289 # if defined(VGP_arm64_linux)
290 res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
291 (UWord)path, (UWord)buf, bufsiz);
292 # elif defined(VGP_tilegx_linux)
293 res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
294 (UWord)buf, bufsiz);
295 # elif defined(VGO_linux) || defined(VGO_darwin)
296 res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
297 # elif defined(VGO_solaris)
298 res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
299 (UWord)buf, bufsiz);
300 # else
301 # error Unknown OS
302 # endif
303 return sr_isError(res) ? -1 : sr_Res(res);
304 }
305
ML_(am_fcntl)306 Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
307 {
308 # if defined(VGO_linux) || defined(VGO_solaris)
309 SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
310 # elif defined(VGO_darwin)
311 SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
312 # else
313 # error "Unknown OS"
314 # endif
315 return sr_isError(res) ? -1 : sr_Res(res);
316 }
317
318 /* Get the dev, inode and mode info for a file descriptor, if
319 possible. Returns True on success. */
ML_(am_get_fd_d_i_m)320 Bool ML_(am_get_fd_d_i_m)( Int fd,
321 /*OUT*/ULong* dev,
322 /*OUT*/ULong* ino, /*OUT*/UInt* mode )
323 {
324 # if defined(VGO_linux) || defined(VGO_darwin)
325 SysRes res;
326 struct vki_stat buf;
327 # if defined(VGO_linux) && defined(__NR_fstat64)
328 /* Try fstat64 first as it can cope with minor and major device
329 numbers outside the 0-255 range and it works properly for x86
330 binaries on amd64 systems where fstat seems to be broken. */
331 struct vki_stat64 buf64;
332 res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64);
333 if (!sr_isError(res)) {
334 *dev = (ULong)buf64.st_dev;
335 *ino = (ULong)buf64.st_ino;
336 *mode = (UInt) buf64.st_mode;
337 return True;
338 }
339 # endif
340 res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
341 if (!sr_isError(res)) {
342 *dev = (ULong)buf.st_dev;
343 *ino = (ULong)buf.st_ino;
344 *mode = (UInt) buf.st_mode;
345 return True;
346 }
347 return False;
348 # elif defined(VGO_solaris)
349 # if defined(VGP_x86_solaris)
350 struct vki_stat64 buf64;
351 SysRes res = VG_(do_syscall4)(__NR_fstatat64, fd, 0, (UWord)&buf64, 0);
352 # elif defined(VGP_amd64_solaris)
353 struct vki_stat buf64;
354 SysRes res = VG_(do_syscall4)(__NR_fstatat, fd, 0, (UWord)&buf64, 0);
355 # else
356 # error "Unknown platform"
357 # endif
358 if (!sr_isError(res)) {
359 *dev = (ULong)buf64.st_dev;
360 *ino = (ULong)buf64.st_ino;
361 *mode = (UInt) buf64.st_mode;
362 return True;
363 }
364 return False;
365 # else
366 # error Unknown OS
367 # endif
368 }
369
ML_(am_resolve_filename)370 Bool ML_(am_resolve_filename) ( Int fd, /*OUT*/HChar* buf, Int nbuf )
371 {
372 #if defined(VGO_linux)
373 Int i;
374 HChar tmp[64]; // large enough
375 for (i = 0; i < nbuf; i++) buf[i] = 0;
376 ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd);
377 if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
378 return True;
379 else
380 return False;
381
382 #elif defined(VGO_darwin)
383 HChar tmp[VKI_MAXPATHLEN+1];
384 if (0 == ML_(am_fcntl)(fd, VKI_F_GETPATH, (UWord)tmp)) {
385 if (nbuf > 0) {
386 VG_(strncpy)( buf, tmp, nbuf < sizeof(tmp) ? nbuf : sizeof(tmp) );
387 buf[nbuf-1] = 0;
388 }
389 if (tmp[0] == '/') return True;
390 }
391 return False;
392
393 #elif defined(VGO_solaris)
394 Int i;
395 HChar tmp[64];
396 for (i = 0; i < nbuf; i++) buf[i] = 0;
397 ML_(am_sprintf)(tmp, "/proc/self/path/%d", fd);
398 if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
399 return True;
400 else
401 return False;
402
403 # else
404 # error Unknown OS
405 # endif
406 }
407
408
409
410
411 /*-----------------------------------------------------------------*/
412 /*--- ---*/
413 /*--- Manage stacks for Valgrind itself. ---*/
414 /*--- ---*/
415 /*-----------------------------------------------------------------*/
416 struct _VgStack {
417 HChar bytes[1];
418 // We use a fake size of 1. A bigger size is allocated
419 // by VG_(am_alloc_VgStack).
420 };
421
422 /* Allocate and initialise a VgStack (anonymous valgrind space).
423 Protect the stack active area and the guard areas appropriately.
424 Returns NULL on failure, else the address of the bottom of the
425 stack. On success, also sets *initial_sp to what the stack pointer
426 should be set to. */
427
VG_(am_alloc_VgStack)428 VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
429 {
430 Int szB;
431 SysRes sres;
432 VgStack* stack;
433 UInt* p;
434 Int i;
435
436 /* Allocate the stack. */
437 szB = VG_STACK_GUARD_SZB
438 + VG_(clo_valgrind_stacksize) + VG_STACK_GUARD_SZB;
439
440 sres = VG_(am_mmap_anon_float_valgrind)( szB );
441 if (sr_isError(sres))
442 return NULL;
443
444 stack = (VgStack*)(Addr)sr_Res(sres);
445
446 aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
447 aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
448
449 /* Protect the guard areas. */
450 sres = local_do_mprotect_NO_NOTIFY(
451 (Addr) &stack[0],
452 VG_STACK_GUARD_SZB, VKI_PROT_NONE
453 );
454 if (sr_isError(sres)) goto protect_failed;
455 VG_(am_notify_mprotect)(
456 (Addr) &stack->bytes[0],
457 VG_STACK_GUARD_SZB, VKI_PROT_NONE
458 );
459
460 sres = local_do_mprotect_NO_NOTIFY(
461 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_(clo_valgrind_stacksize)],
462 VG_STACK_GUARD_SZB, VKI_PROT_NONE
463 );
464 if (sr_isError(sres)) goto protect_failed;
465 VG_(am_notify_mprotect)(
466 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_(clo_valgrind_stacksize)],
467 VG_STACK_GUARD_SZB, VKI_PROT_NONE
468 );
469
470 /* Looks good. Fill the active area with junk so we can later
471 tell how much got used. */
472
473 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
474 for (i = 0; i < VG_(clo_valgrind_stacksize)/sizeof(UInt); i++)
475 p[i] = 0xDEADBEEF;
476
477 *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_(clo_valgrind_stacksize)];
478 *initial_sp -= 8;
479 *initial_sp &= ~((Addr)0x1F); /* 32-align it */
480
481 VG_(debugLog)( 1,"aspacem",
482 "allocated valgrind thread stack at 0x%llx size %d\n",
483 (ULong)(Addr)stack, szB);
484 ML_(am_do_sanity_check)();
485 return stack;
486
487 protect_failed:
488 /* The stack was allocated, but we can't protect it. Unmap it and
489 return NULL (failure). */
490 (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
491 ML_(am_do_sanity_check)();
492 return NULL;
493 }
494
495
496 /* Figure out how many bytes of the stack's active area have not
497 been used. Used for estimating if we are close to overflowing it. */
498
VG_(am_get_VgStack_unused_szB)499 SizeT VG_(am_get_VgStack_unused_szB)( const VgStack* stack, SizeT limit )
500 {
501 SizeT i;
502 const UInt* p;
503
504 p = (const UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
505 for (i = 0; i < VG_(clo_valgrind_stacksize)/sizeof(UInt); i++) {
506 if (p[i] != 0xDEADBEEF)
507 break;
508 if (i * sizeof(UInt) >= limit)
509 break;
510 }
511
512 return i * sizeof(UInt);
513 }
514
515
516 /*--------------------------------------------------------------------*/
517 /*--- end ---*/
518 /*--------------------------------------------------------------------*/
519