1 //===-- sanitizer_posix_libcdep.cc ----------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries and implements libc-dependent POSIX-specific functions
12 // from sanitizer_libc.h.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_platform.h"
16
17 #if SANITIZER_POSIX
18
19 #include "sanitizer_common.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_platform_limits_posix.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
24 #include "sanitizer_stacktrace.h"
25 #include "sanitizer_symbolizer.h"
26
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <stdlib.h>
32 #include <sys/mman.h>
33 #include <sys/resource.h>
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #include <sys/types.h>
37 #include <unistd.h>
38
39 #if SANITIZER_FREEBSD
40 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
41 // that, it was never implemented. So just define it to zero.
42 #undef MAP_NORESERVE
43 #define MAP_NORESERVE 0
44 #endif
45
46 namespace __sanitizer {
47
GetUid()48 u32 GetUid() {
49 return getuid();
50 }
51
GetThreadSelf()52 uptr GetThreadSelf() {
53 return (uptr)pthread_self();
54 }
55
FlushUnneededShadowMemory(uptr addr,uptr size)56 void FlushUnneededShadowMemory(uptr addr, uptr size) {
57 madvise((void*)addr, size, MADV_DONTNEED);
58 }
59
NoHugePagesInRegion(uptr addr,uptr size)60 void NoHugePagesInRegion(uptr addr, uptr size) {
61 #ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
62 madvise((void *)addr, size, MADV_NOHUGEPAGE);
63 #endif // MADV_NOHUGEPAGE
64 }
65
DontDumpShadowMemory(uptr addr,uptr length)66 void DontDumpShadowMemory(uptr addr, uptr length) {
67 #ifdef MADV_DONTDUMP
68 madvise((void *)addr, length, MADV_DONTDUMP);
69 #endif
70 }
71
getlim(int res)72 static rlim_t getlim(int res) {
73 rlimit rlim;
74 CHECK_EQ(0, getrlimit(res, &rlim));
75 return rlim.rlim_cur;
76 }
77
setlim(int res,rlim_t lim)78 static void setlim(int res, rlim_t lim) {
79 // The following magic is to prevent clang from replacing it with memset.
80 volatile struct rlimit rlim;
81 rlim.rlim_cur = lim;
82 rlim.rlim_max = lim;
83 if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) {
84 Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
85 Die();
86 }
87 }
88
DisableCoreDumperIfNecessary()89 void DisableCoreDumperIfNecessary() {
90 if (common_flags()->disable_coredump) {
91 setlim(RLIMIT_CORE, 0);
92 }
93 }
94
StackSizeIsUnlimited()95 bool StackSizeIsUnlimited() {
96 rlim_t stack_size = getlim(RLIMIT_STACK);
97 return (stack_size == RLIM_INFINITY);
98 }
99
SetStackSizeLimitInBytes(uptr limit)100 void SetStackSizeLimitInBytes(uptr limit) {
101 setlim(RLIMIT_STACK, (rlim_t)limit);
102 CHECK(!StackSizeIsUnlimited());
103 }
104
AddressSpaceIsUnlimited()105 bool AddressSpaceIsUnlimited() {
106 rlim_t as_size = getlim(RLIMIT_AS);
107 return (as_size == RLIM_INFINITY);
108 }
109
SetAddressSpaceUnlimited()110 void SetAddressSpaceUnlimited() {
111 setlim(RLIMIT_AS, RLIM_INFINITY);
112 CHECK(AddressSpaceIsUnlimited());
113 }
114
SleepForSeconds(int seconds)115 void SleepForSeconds(int seconds) {
116 sleep(seconds);
117 }
118
SleepForMillis(int millis)119 void SleepForMillis(int millis) {
120 usleep(millis * 1000);
121 }
122
Abort()123 void Abort() {
124 abort();
125 }
126
Atexit(void (* function)(void))127 int Atexit(void (*function)(void)) {
128 #ifndef SANITIZER_GO
129 return atexit(function);
130 #else
131 return 0;
132 #endif
133 }
134
SupportsColoredOutput(fd_t fd)135 bool SupportsColoredOutput(fd_t fd) {
136 return isatty(fd) != 0;
137 }
138
139 #ifndef SANITIZER_GO
140 // TODO(glider): different tools may require different altstack size.
141 static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
142
SetAlternateSignalStack()143 void SetAlternateSignalStack() {
144 stack_t altstack, oldstack;
145 CHECK_EQ(0, sigaltstack(nullptr, &oldstack));
146 // If the alternate stack is already in place, do nothing.
147 // Android always sets an alternate stack, but it's too small for us.
148 if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
149 // TODO(glider): the mapped stack should have the MAP_STACK flag in the
150 // future. It is not required by man 2 sigaltstack now (they're using
151 // malloc()).
152 void* base = MmapOrDie(kAltStackSize, __func__);
153 altstack.ss_sp = (char*) base;
154 altstack.ss_flags = 0;
155 altstack.ss_size = kAltStackSize;
156 CHECK_EQ(0, sigaltstack(&altstack, nullptr));
157 }
158
UnsetAlternateSignalStack()159 void UnsetAlternateSignalStack() {
160 stack_t altstack, oldstack;
161 altstack.ss_sp = nullptr;
162 altstack.ss_flags = SS_DISABLE;
163 altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
164 CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
165 UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
166 }
167
168 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
MaybeInstallSigaction(int signum,SignalHandlerType handler)169 static void MaybeInstallSigaction(int signum,
170 SignalHandlerType handler) {
171 if (!IsDeadlySignal(signum))
172 return;
173 struct sigaction sigact;
174 internal_memset(&sigact, 0, sizeof(sigact));
175 sigact.sa_sigaction = (sa_sigaction_t)handler;
176 // Do not block the signal from being received in that signal's handler.
177 // Clients are responsible for handling this correctly.
178 sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
179 if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
180 CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));
181 VReport(1, "Installed the sigaction for signal %d\n", signum);
182 }
183
InstallDeadlySignalHandlers(SignalHandlerType handler)184 void InstallDeadlySignalHandlers(SignalHandlerType handler) {
185 // Set the alternate signal stack for the main thread.
186 // This will cause SetAlternateSignalStack to be called twice, but the stack
187 // will be actually set only once.
188 if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
189 MaybeInstallSigaction(SIGSEGV, handler);
190 MaybeInstallSigaction(SIGBUS, handler);
191 MaybeInstallSigaction(SIGABRT, handler);
192 MaybeInstallSigaction(SIGFPE, handler);
193 MaybeInstallSigaction(SIGILL, handler);
194 }
195 #endif // SANITIZER_GO
196
IsAccessibleMemoryRange(uptr beg,uptr size)197 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
198 uptr page_size = GetPageSizeCached();
199 // Checking too large memory ranges is slow.
200 CHECK_LT(size, page_size * 10);
201 int sock_pair[2];
202 if (pipe(sock_pair))
203 return false;
204 uptr bytes_written =
205 internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);
206 int write_errno;
207 bool result;
208 if (internal_iserror(bytes_written, &write_errno)) {
209 CHECK_EQ(EFAULT, write_errno);
210 result = false;
211 } else {
212 result = (bytes_written == size);
213 }
214 internal_close(sock_pair[0]);
215 internal_close(sock_pair[1]);
216 return result;
217 }
218
PrepareForSandboxing(__sanitizer_sandbox_arguments * args)219 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
220 // Some kinds of sandboxes may forbid filesystem access, so we won't be able
221 // to read the file mappings from /proc/self/maps. Luckily, neither the
222 // process will be able to load additional libraries, so it's fine to use the
223 // cached mappings.
224 MemoryMappingLayout::CacheMemoryMappings();
225 // Same for /proc/self/exe in the symbolizer.
226 #if !SANITIZER_GO
227 Symbolizer::GetOrInit()->PrepareForSandboxing();
228 CovPrepareForSandboxing(args);
229 #endif
230 }
231
232 #if SANITIZER_ANDROID || SANITIZER_GO
GetNamedMappingFd(const char * name,uptr size)233 int GetNamedMappingFd(const char *name, uptr size) {
234 return -1;
235 }
236 #else
GetNamedMappingFd(const char * name,uptr size)237 int GetNamedMappingFd(const char *name, uptr size) {
238 if (!common_flags()->decorate_proc_maps)
239 return -1;
240 char shmname[200];
241 CHECK(internal_strlen(name) < sizeof(shmname) - 10);
242 internal_snprintf(shmname, sizeof(shmname), "%zu [%s]", internal_getpid(),
243 name);
244 int fd = shm_open(shmname, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU);
245 CHECK_GE(fd, 0);
246 int res = internal_ftruncate(fd, size);
247 CHECK_EQ(0, res);
248 res = shm_unlink(shmname);
249 CHECK_EQ(0, res);
250 return fd;
251 }
252 #endif
253
MmapFixedNoReserve(uptr fixed_addr,uptr size,const char * name)254 void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
255 int fd = name ? GetNamedMappingFd(name, size) : -1;
256 unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
257 if (fd == -1) flags |= MAP_ANON;
258
259 uptr PageSize = GetPageSizeCached();
260 uptr p = internal_mmap((void *)(fixed_addr & ~(PageSize - 1)),
261 RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE,
262 flags, fd, 0);
263 int reserrno;
264 if (internal_iserror(p, &reserrno))
265 Report("ERROR: %s failed to "
266 "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
267 SanitizerToolName, size, size, fixed_addr, reserrno);
268 IncreaseTotalMmap(size);
269 return (void *)p;
270 }
271
MmapNoAccess(uptr fixed_addr,uptr size,const char * name)272 void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
273 int fd = name ? GetNamedMappingFd(name, size) : -1;
274 unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
275 if (fd == -1) flags |= MAP_ANON;
276
277 return (void *)internal_mmap((void *)fixed_addr, size, PROT_NONE, flags, fd,
278 0);
279 }
280
281 // This function is defined elsewhere if we intercepted pthread_attr_getstack.
282 extern "C" {
283 SANITIZER_WEAK_ATTRIBUTE int
284 real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
285 } // extern "C"
286
my_pthread_attr_getstack(void * attr,void ** addr,uptr * size)287 int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
288 #if !SANITIZER_GO && !SANITIZER_MAC
289 if (&real_pthread_attr_getstack)
290 return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
291 (size_t *)size);
292 #endif
293 return pthread_attr_getstack((pthread_attr_t *)attr, addr, (size_t *)size);
294 }
295
296 #if !SANITIZER_GO
AdjustStackSize(void * attr_)297 void AdjustStackSize(void *attr_) {
298 pthread_attr_t *attr = (pthread_attr_t *)attr_;
299 uptr stackaddr = 0;
300 uptr stacksize = 0;
301 my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
302 // GLibC will return (0 - stacksize) as the stack address in the case when
303 // stacksize is set, but stackaddr is not.
304 bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
305 // We place a lot of tool data into TLS, account for that.
306 const uptr minstacksize = GetTlsSize() + 128*1024;
307 if (stacksize < minstacksize) {
308 if (!stack_set) {
309 if (stacksize != 0) {
310 VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
311 minstacksize);
312 pthread_attr_setstacksize(attr, minstacksize);
313 }
314 } else {
315 Printf("Sanitizer: pre-allocated stack size is insufficient: "
316 "%zu < %zu\n", stacksize, minstacksize);
317 Printf("Sanitizer: pthread_create is likely to fail.\n");
318 }
319 }
320 }
321 #endif // !SANITIZER_GO
322
323 } // namespace __sanitizer
324
325 #endif // SANITIZER_POSIX
326