1 
2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: stuff common to all platforms     ---*/
4 /*---                                                              ---*/
5 /*---                                         m_aspacemgr-common.c ---*/
6 /*--------------------------------------------------------------------*/
7 
8 /*
9    This file is part of Valgrind, a dynamic binary instrumentation
10    framework.
11 
12    Copyright (C) 2006-2013 OpenWorks LLP
13       info@open-works.co.uk
14 
15    This program is free software; you can redistribute it and/or
16    modify it under the terms of the GNU General Public License as
17    published by the Free Software Foundation; either version 2 of the
18    License, or (at your option) any later version.
19 
20    This program is distributed in the hope that it will be useful, but
21    WITHOUT ANY WARRANTY; without even the implied warranty of
22    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23    General Public License for more details.
24 
25    You should have received a copy of the GNU General Public License
26    along with this program; if not, write to the Free Software
27    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28    02111-1307, USA.
29 
30    The GNU General Public License is contained in the file COPYING.
31 */
32 
33 /* *************************************************************
34    DO NOT INCLUDE ANY OTHER FILES HERE.
35    ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
36    AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
37    ************************************************************* */
38 
39 #include "priv_aspacemgr.h"
40 #include "config.h"
41 
42 
43 /*-----------------------------------------------------------------*/
44 /*---                                                           ---*/
45 /*--- Stuff to make aspacem almost completely independent of    ---*/
46 /*--- the rest of Valgrind.                                     ---*/
47 /*---                                                           ---*/
48 /*-----------------------------------------------------------------*/
49 
50 //--------------------------------------------------------------
51 // Simple assert and assert-like fns, which avoid dependence on
52 // m_libcassert, and hence on the entire debug-info reader swamp
53 
54 __attribute__ ((noreturn))
ML_(am_exit)55 void ML_(am_exit)( Int status )
56 {
57    VG_(exit_now) (status);
58 }
59 
ML_(am_barf)60 void ML_(am_barf) ( const HChar* what )
61 {
62    VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
63    VG_(debugLog)(0, "aspacem", "Exiting now.\n");
64    ML_(am_exit)(1);
65 }
66 
ML_(am_barf_toolow)67 void ML_(am_barf_toolow) ( const HChar* what )
68 {
69    VG_(debugLog)(0, "aspacem",
70                     "Valgrind: FATAL: %s is too low.\n", what);
71    VG_(debugLog)(0, "aspacem", "  Increase it and rebuild.  "
72                                "Exiting now.\n");
73    ML_(am_exit)(1);
74 }
75 
ML_(am_assert_fail)76 void ML_(am_assert_fail)( const HChar* expr,
77                           const HChar* file,
78                           Int line,
79                           const HChar* fn )
80 {
81    VG_(debugLog)(0, "aspacem",
82                     "Valgrind: FATAL: aspacem assertion failed:\n");
83    VG_(debugLog)(0, "aspacem", "  %s\n", expr);
84    VG_(debugLog)(0, "aspacem", "  at %s:%d (%s)\n", file,line,fn);
85    VG_(debugLog)(0, "aspacem", "Exiting now.\n");
86    ML_(am_exit)(1);
87 }
88 
ML_(am_getpid)89 Int ML_(am_getpid)( void )
90 {
91    SysRes sres = VG_(do_syscall0)(__NR_getpid);
92    aspacem_assert(!sr_isError(sres));
93    return sr_Res(sres);
94 }
95 
96 
97 //--------------------------------------------------------------
98 // A simple sprintf implementation, so as to avoid dependence on
99 // m_libcprint.
100 
local_add_to_aspacem_sprintf_buf(HChar c,void * p)101 static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p )
102 {
103    HChar** aspacem_sprintf_ptr = p;
104    *(*aspacem_sprintf_ptr)++ = c;
105 }
106 
107 static
local_vsprintf(HChar * buf,const HChar * format,va_list vargs)108 UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
109 {
110    Int ret;
111    HChar *aspacem_sprintf_ptr = buf;
112 
113    ret = VG_(debugLog_vprintf)
114             ( local_add_to_aspacem_sprintf_buf,
115               &aspacem_sprintf_ptr, format, vargs );
116    local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
117 
118    return ret;
119 }
120 
ML_(am_sprintf)121 UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... )
122 {
123    UInt ret;
124    va_list vargs;
125 
126    va_start(vargs,format);
127    ret = local_vsprintf(buf, format, vargs);
128    va_end(vargs);
129 
130    return ret;
131 }
132 
133 
134 //--------------------------------------------------------------
135 // Direct access to a handful of syscalls.  This avoids dependence on
136 // m_libc*.  THESE DO NOT UPDATE THE aspacem-internal DATA
137 // STRUCTURES (SEGMENT ARRAY).  DO NOT USE THEM UNLESS YOU KNOW WHAT
138 // YOU ARE DOING.
139 
140 /* --- Pertaining to mappings --- */
141 
142 /* Note: this is VG_, not ML_. */
VG_(am_do_mmap_NO_NOTIFY)143 SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
144                                   UInt flags, Int fd, Off64T offset)
145 {
146    SysRes res;
147    aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
148 
149 #  if defined(VGP_arm64_linux)
150    res = VG_(do_syscall6)(__NR3264_mmap, (UWord)start, length,
151                          prot, flags, fd, offset);
152 #  elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
153         || defined(VGP_arm_linux)
154    /* mmap2 uses 4096 chunks even if actual page size is bigger. */
155    aspacem_assert((offset % 4096) == 0);
156    res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
157                           prot, flags, fd, offset / 4096);
158 #  elif defined(VGP_amd64_linux) \
159         || defined(VGP_ppc64be_linux)  || defined(VGP_ppc64le_linux) \
160         || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
161         || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
162         || defined(VGP_tilegx_linux)
163    res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
164                          prot, flags, fd, offset);
165 #  elif defined(VGP_x86_darwin)
166    if (fd == 0  &&  (flags & VKI_MAP_ANONYMOUS)) {
167        fd = -1;  // MAP_ANON with fd==0 is EINVAL
168    }
169    res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
170                           prot, flags, fd, offset & 0xffffffff, offset >> 32);
171 #  elif defined(VGP_amd64_darwin)
172    if (fd == 0  &&  (flags & VKI_MAP_ANONYMOUS)) {
173        fd = -1;  // MAP_ANON with fd==0 is EINVAL
174    }
175    res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
176                           prot, flags, (UInt)fd, offset);
177 #  else
178 #    error Unknown platform
179 #  endif
180    return res;
181 }
182 
183 static
local_do_mprotect_NO_NOTIFY(Addr start,SizeT length,UInt prot)184 SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
185 {
186    return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
187 }
188 
ML_(am_do_munmap_NO_NOTIFY)189 SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length)
190 {
191    return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
192 }
193 
194 #if HAVE_MREMAP
195 /* The following are used only to implement mremap(). */
196 
ML_(am_do_extend_mapping_NO_NOTIFY)197 SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
198           Addr  old_addr,
199           SizeT old_len,
200           SizeT new_len
201        )
202 {
203    /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
204       new_len, WITHOUT moving it.  If it can't be extended in place,
205       fail. */
206 #  if defined(VGO_linux)
207    return VG_(do_syscall5)(
208              __NR_mremap,
209              old_addr, old_len, new_len,
210              0/*flags, meaning: must be at old_addr, else FAIL */,
211              0/*new_addr, is ignored*/
212           );
213 #  else
214 #    error Unknown OS
215 #  endif
216 }
217 
ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)218 SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
219           Addr old_addr, Addr old_len,
220           Addr new_addr, Addr new_len
221        )
222 {
223    /* Move the mapping old_addr .. old_addr+old_len-1 to the new
224       location and with the new length.  Only needs to handle the case
225       where the two areas do not overlap, neither length is zero, and
226       all args are page aligned. */
227 #  if defined(VGO_linux)
228    return VG_(do_syscall5)(
229              __NR_mremap,
230              old_addr, old_len, new_len,
231              VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
232              new_addr
233           );
234 #  else
235 #    error Unknown OS
236 #  endif
237 }
238 
239 #endif
240 
241 /* --- Pertaining to files --- */
242 
ML_(am_open)243 SysRes ML_(am_open) ( const HChar* pathname, Int flags, Int mode )
244 {
245 #  if defined(VGP_arm64_linux)
246    /* ARM64 wants to use __NR_openat rather than __NR_open. */
247    SysRes res = VG_(do_syscall4)(__NR_openat,
248                                  VKI_AT_FDCWD, (UWord)pathname, flags, mode);
249 #  elif defined(VGP_tilegx_linux)
250    SysRes res = VG_(do_syscall4)(__NR_openat, VKI_AT_FDCWD, (UWord)pathname,
251                                  flags, mode);
252 #  else
253    SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
254 #  endif
255    return res;
256 }
257 
ML_(am_read)258 Int ML_(am_read) ( Int fd, void* buf, Int count)
259 {
260    SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
261    return sr_isError(res) ? -1 : sr_Res(res);
262 }
263 
ML_(am_close)264 void ML_(am_close) ( Int fd )
265 {
266    (void)VG_(do_syscall1)(__NR_close, fd);
267 }
268 
ML_(am_readlink)269 Int ML_(am_readlink)(const HChar* path, HChar* buf, UInt bufsiz)
270 {
271    SysRes res;
272 #  if defined(VGP_arm64_linux)
273    res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
274                                            (UWord)path, (UWord)buf, bufsiz);
275 #  elif defined(VGP_tilegx_linux)
276    res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
277                           (UWord)buf, bufsiz);
278 #  else
279    res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
280 #  endif
281    return sr_isError(res) ? -1 : sr_Res(res);
282 }
283 
ML_(am_fcntl)284 Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
285 {
286 #  if defined(VGO_linux)
287    SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
288 #  elif defined(VGO_darwin)
289    SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
290 #  else
291 #  error "Unknown OS"
292 #  endif
293    return sr_isError(res) ? -1 : sr_Res(res);
294 }
295 
296 /* Get the dev, inode and mode info for a file descriptor, if
297    possible.  Returns True on success. */
ML_(am_get_fd_d_i_m)298 Bool ML_(am_get_fd_d_i_m)( Int fd,
299                            /*OUT*/ULong* dev,
300                            /*OUT*/ULong* ino, /*OUT*/UInt* mode )
301 {
302    SysRes          res;
303    struct vki_stat buf;
304 #  if defined(VGO_linux) && defined(__NR_fstat64)
305    /* Try fstat64 first as it can cope with minor and major device
306       numbers outside the 0-255 range and it works properly for x86
307       binaries on amd64 systems where fstat seems to be broken. */
308    struct vki_stat64 buf64;
309    res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64);
310    if (!sr_isError(res)) {
311       *dev  = (ULong)buf64.st_dev;
312       *ino  = (ULong)buf64.st_ino;
313       *mode = (UInt) buf64.st_mode;
314       return True;
315    }
316 #  endif
317    res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
318    if (!sr_isError(res)) {
319       *dev  = (ULong)buf.st_dev;
320       *ino  = (ULong)buf.st_ino;
321       *mode = (UInt) buf.st_mode;
322       return True;
323    }
324    return False;
325 }
326 
ML_(am_resolve_filename)327 Bool ML_(am_resolve_filename) ( Int fd, /*OUT*/HChar* buf, Int nbuf )
328 {
329 #if defined(VGO_linux)
330    Int i;
331    HChar tmp[64];    // large enough
332    for (i = 0; i < nbuf; i++) buf[i] = 0;
333    ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd);
334    if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
335       return True;
336    else
337       return False;
338 
339 #elif defined(VGO_darwin)
340    HChar tmp[VKI_MAXPATHLEN+1];
341    if (0 == ML_(am_fcntl)(fd, VKI_F_GETPATH, (UWord)tmp)) {
342       if (nbuf > 0) {
343          VG_(strncpy)( buf, tmp, nbuf < sizeof(tmp) ? nbuf : sizeof(tmp) );
344          buf[nbuf-1] = 0;
345       }
346       if (tmp[0] == '/') return True;
347    }
348    return False;
349 
350 #  else
351 #     error Unknown OS
352 #  endif
353 }
354 
355 
356 
357 
358 /*-----------------------------------------------------------------*/
359 /*---                                                           ---*/
360 /*--- Manage stacks for Valgrind itself.                        ---*/
361 /*---                                                           ---*/
362 /*-----------------------------------------------------------------*/
363 struct _VgStack {
364    HChar bytes[1];
365    // We use a fake size of 1. A bigger size is allocated
366    // by VG_(am_alloc_VgStack).
367 };
368 
369 /* Allocate and initialise a VgStack (anonymous valgrind space).
370    Protect the stack active area and the guard areas appropriately.
371    Returns NULL on failure, else the address of the bottom of the
372    stack.  On success, also sets *initial_sp to what the stack pointer
373    should be set to. */
374 
VG_(am_alloc_VgStack)375 VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
376 {
377    Int      szB;
378    SysRes   sres;
379    VgStack* stack;
380    UInt*    p;
381    Int      i;
382 
383    /* Allocate the stack. */
384    szB = VG_STACK_GUARD_SZB
385          + VG_(clo_valgrind_stacksize) + VG_STACK_GUARD_SZB;
386 
387    sres = VG_(am_mmap_anon_float_valgrind)( szB );
388    if (sr_isError(sres))
389       return NULL;
390 
391    stack = (VgStack*)(Addr)sr_Res(sres);
392 
393    aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
394    aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
395 
396    /* Protect the guard areas. */
397    sres = local_do_mprotect_NO_NOTIFY(
398              (Addr) &stack[0],
399              VG_STACK_GUARD_SZB, VKI_PROT_NONE
400           );
401    if (sr_isError(sres)) goto protect_failed;
402    VG_(am_notify_mprotect)(
403       (Addr) &stack->bytes[0],
404       VG_STACK_GUARD_SZB, VKI_PROT_NONE
405    );
406 
407    sres = local_do_mprotect_NO_NOTIFY(
408              (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_(clo_valgrind_stacksize)],
409              VG_STACK_GUARD_SZB, VKI_PROT_NONE
410           );
411    if (sr_isError(sres)) goto protect_failed;
412    VG_(am_notify_mprotect)(
413       (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_(clo_valgrind_stacksize)],
414       VG_STACK_GUARD_SZB, VKI_PROT_NONE
415    );
416 
417    /* Looks good.  Fill the active area with junk so we can later
418       tell how much got used. */
419 
420    p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
421    for (i = 0; i < VG_(clo_valgrind_stacksize)/sizeof(UInt); i++)
422       p[i] = 0xDEADBEEF;
423 
424    *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_(clo_valgrind_stacksize)];
425    *initial_sp -= 8;
426    *initial_sp &= ~((Addr)0x1F); /* 32-align it */
427 
428    VG_(debugLog)( 1,"aspacem",
429                   "allocated valgrind thread stack at 0x%llx size %d\n",
430                   (ULong)(Addr)stack, szB);
431    ML_(am_do_sanity_check)();
432    return stack;
433 
434   protect_failed:
435    /* The stack was allocated, but we can't protect it.  Unmap it and
436       return NULL (failure). */
437    (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
438    ML_(am_do_sanity_check)();
439    return NULL;
440 }
441 
442 
443 /* Figure out how many bytes of the stack's active area have not
444    been used.  Used for estimating if we are close to overflowing it. */
445 
VG_(am_get_VgStack_unused_szB)446 SizeT VG_(am_get_VgStack_unused_szB)( const VgStack* stack, SizeT limit )
447 {
448    SizeT i;
449    const UInt* p;
450 
451    p = (const UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
452    for (i = 0; i < VG_(clo_valgrind_stacksize)/sizeof(UInt); i++) {
453       if (p[i] != 0xDEADBEEF)
454          break;
455       if (i * sizeof(UInt) >= limit)
456          break;
457    }
458 
459    return i * sizeof(UInt);
460 }
461 
462 
463 /*--------------------------------------------------------------------*/
464 /*--- end                                                          ---*/
465 /*--------------------------------------------------------------------*/
466