1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for Linux goes here. For the POSIX-compatible
6 // parts, the implementation is in platform-posix.cc.
7 
8 #include <pthread.h>
9 #include <semaphore.h>
10 #include <signal.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <sys/resource.h>
14 #include <sys/time.h>
15 
16 // Ubuntu Dapper requires memory pages to be marked as
17 // executable. Otherwise, OS raises an exception when executing code
18 // in that page.
19 #include <errno.h>
20 #include <fcntl.h>      // open
21 #include <stdarg.h>
22 #include <strings.h>    // index
23 #include <sys/mman.h>   // mmap & munmap
24 #include <sys/stat.h>   // open
25 #include <sys/types.h>  // mmap & munmap
26 #include <unistd.h>     // sysconf
27 
28 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
29 // Old versions of the C library <signal.h> didn't define the type.
30 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
31     (defined(__arm__) || defined(__aarch64__)) && \
32     !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
33 #include <asm/sigcontext.h>  // NOLINT
34 #endif
35 
36 #if defined(LEAK_SANITIZER)
37 #include <sanitizer/lsan_interface.h>
38 #endif
39 
40 #include <cmath>
41 
42 #undef MAP_TYPE
43 
44 #include "src/base/macros.h"
45 #include "src/base/platform/platform.h"
46 
47 #if V8_OS_NACL
48 #if !defined(MAP_NORESERVE)
49 // PNaCL doesn't have this, so we always grab all of the memory, which is bad.
50 #define MAP_NORESERVE 0
51 #endif
52 #else
53 #include <sys/prctl.h>
54 #include <sys/syscall.h>
55 #endif
56 
57 namespace v8 {
58 namespace base {
59 
60 
61 #ifdef __arm__
62 
ArmUsingHardFloat()63 bool OS::ArmUsingHardFloat() {
64   // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
65   // the Floating Point ABI used (PCS stands for Procedure Call Standard).
66   // We use these as well as a couple of other defines to statically determine
67   // what FP ABI used.
68   // GCC versions 4.4 and below don't support hard-fp.
69   // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
70   // __ARM_PCS_VFP.
71 
72 #define GCC_VERSION (__GNUC__ * 10000                                          \
73                      + __GNUC_MINOR__ * 100                                    \
74                      + __GNUC_PATCHLEVEL__)
75 #if GCC_VERSION >= 40600
76 #if defined(__ARM_PCS_VFP)
77   return true;
78 #else
79   return false;
80 #endif
81 
82 #elif GCC_VERSION < 40500
83   return false;
84 
85 #else
86 #if defined(__ARM_PCS_VFP)
87   return true;
88 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
89       !defined(__VFP_FP__)
90   return false;
91 #else
92 #error "Your version of GCC does not report the FP ABI compiled for."          \
93        "Please report it on this issue"                                        \
94        "http://code.google.com/p/v8/issues/detail?id=2140"
95 
96 #endif
97 #endif
98 #undef GCC_VERSION
99 }
100 
101 #endif  // def __arm__
102 
103 
LocalTimezone(double time,TimezoneCache * cache)104 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
105 #if V8_OS_NACL
106   // Missing support for tm_zone field.
107   return "";
108 #else
109   if (std::isnan(time)) return "";
110   time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
111   struct tm* t = localtime(&tv);  // NOLINT(runtime/threadsafe_fn)
112   if (!t || !t->tm_zone) return "";
113   return t->tm_zone;
114 #endif
115 }
116 
117 
LocalTimeOffset(TimezoneCache * cache)118 double OS::LocalTimeOffset(TimezoneCache* cache) {
119 #if V8_OS_NACL
120   // Missing support for tm_zone field.
121   return 0;
122 #else
123   time_t tv = time(NULL);
124   struct tm* t = localtime(&tv);  // NOLINT(runtime/threadsafe_fn)
125   // tm_gmtoff includes any daylight savings offset, so subtract it.
126   return static_cast<double>(t->tm_gmtoff * msPerSecond -
127                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
128 #endif
129 }
130 
131 
Allocate(const size_t requested,size_t * allocated,bool is_executable)132 void* OS::Allocate(const size_t requested,
133                    size_t* allocated,
134                    bool is_executable) {
135   const size_t msize = RoundUp(requested, AllocateAlignment());
136   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
137   void* addr = OS::GetRandomMmapAddr();
138   void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
139   if (mbase == MAP_FAILED) return NULL;
140   *allocated = msize;
141   return mbase;
142 }
143 
144 
GetSharedLibraryAddresses()145 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
146   std::vector<SharedLibraryAddress> result;
147   // This function assumes that the layout of the file is as follows:
148   // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
149   // If we encounter an unexpected situation we abort scanning further entries.
150   FILE* fp = fopen("/proc/self/maps", "r");
151   if (fp == NULL) return result;
152 
153   // Allocate enough room to be able to store a full file name.
154   const int kLibNameLen = FILENAME_MAX + 1;
155   char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
156 
157   // This loop will terminate once the scanning hits an EOF.
158   while (true) {
159     uintptr_t start, end;
160     char attr_r, attr_w, attr_x, attr_p;
161     // Parse the addresses and permission bits at the beginning of the line.
162     if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
163     if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
164 
165     int c;
166     if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
167       // Found a read-only executable entry. Skip characters until we reach
168       // the beginning of the filename or the end of the line.
169       do {
170         c = getc(fp);
171       } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
172       if (c == EOF) break;  // EOF: Was unexpected, just exit.
173 
174       // Process the filename if found.
175       if ((c == '/') || (c == '[')) {
176         // Push the '/' or '[' back into the stream to be read below.
177         ungetc(c, fp);
178 
179         // Read to the end of the line. Exit if the read fails.
180         if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
181 
182         // Drop the newline character read by fgets. We do not need to check
183         // for a zero-length string because we know that we at least read the
184         // '/' or '[' character.
185         lib_name[strlen(lib_name) - 1] = '\0';
186       } else {
187         // No library name found, just record the raw address range.
188         snprintf(lib_name, kLibNameLen,
189                  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
190       }
191       result.push_back(SharedLibraryAddress(lib_name, start, end));
192     } else {
193       // Entry not describing executable data. Skip to end of line to set up
194       // reading the next entry.
195       do {
196         c = getc(fp);
197       } while ((c != EOF) && (c != '\n'));
198       if (c == EOF) break;
199     }
200   }
201   free(lib_name);
202   fclose(fp);
203   return result;
204 }
205 
206 
SignalCodeMovingGC()207 void OS::SignalCodeMovingGC() {
208   // Support for ll_prof.py.
209   //
210   // The Linux profiler built into the kernel logs all mmap's with
211   // PROT_EXEC so that analysis tools can properly attribute ticks. We
212   // do a mmap with a name known by ll_prof.py and immediately munmap
213   // it. This injects a GC marker into the stream of events generated
214   // by the kernel and allows us to synchronize V8 code log and the
215   // kernel log.
216   long size = sysconf(_SC_PAGESIZE);  // NOLINT(runtime/int)
217   FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
218   if (f == NULL) {
219     OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
220     OS::Abort();
221   }
222   void* addr = mmap(OS::GetRandomMmapAddr(), size,
223 #if V8_OS_NACL
224                     // The Native Client port of V8 uses an interpreter,
225                     // so code pages don't need PROT_EXEC.
226                     PROT_READ,
227 #else
228                     PROT_READ | PROT_EXEC,
229 #endif
230                     MAP_PRIVATE, fileno(f), 0);
231   DCHECK_NE(MAP_FAILED, addr);
232   OS::Free(addr, size);
233   fclose(f);
234 }
235 
236 
237 // Constants used for mmap.
238 static const int kMmapFd = -1;
239 static const int kMmapFdOffset = 0;
240 
241 
VirtualMemory()242 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
243 
244 
VirtualMemory(size_t size)245 VirtualMemory::VirtualMemory(size_t size)
246     : address_(ReserveRegion(size)), size_(size) { }
247 
248 
VirtualMemory(size_t size,size_t alignment)249 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
250     : address_(NULL), size_(0) {
251   DCHECK((alignment % OS::AllocateAlignment()) == 0);
252   size_t request_size = RoundUp(size + alignment,
253                                 static_cast<intptr_t>(OS::AllocateAlignment()));
254   void* reservation = mmap(OS::GetRandomMmapAddr(),
255                            request_size,
256                            PROT_NONE,
257                            MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
258                            kMmapFd,
259                            kMmapFdOffset);
260   if (reservation == MAP_FAILED) return;
261 
262   uint8_t* base = static_cast<uint8_t*>(reservation);
263   uint8_t* aligned_base = RoundUp(base, alignment);
264   DCHECK_LE(base, aligned_base);
265 
266   // Unmap extra memory reserved before and after the desired block.
267   if (aligned_base != base) {
268     size_t prefix_size = static_cast<size_t>(aligned_base - base);
269     OS::Free(base, prefix_size);
270     request_size -= prefix_size;
271   }
272 
273   size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
274   DCHECK_LE(aligned_size, request_size);
275 
276   if (aligned_size != request_size) {
277     size_t suffix_size = request_size - aligned_size;
278     OS::Free(aligned_base + aligned_size, suffix_size);
279     request_size -= suffix_size;
280   }
281 
282   DCHECK(aligned_size == request_size);
283 
284   address_ = static_cast<void*>(aligned_base);
285   size_ = aligned_size;
286 #if defined(LEAK_SANITIZER)
287   __lsan_register_root_region(address_, size_);
288 #endif
289 }
290 
291 
~VirtualMemory()292 VirtualMemory::~VirtualMemory() {
293   if (IsReserved()) {
294     bool result = ReleaseRegion(address(), size());
295     DCHECK(result);
296     USE(result);
297   }
298 }
299 
300 
IsReserved()301 bool VirtualMemory::IsReserved() {
302   return address_ != NULL;
303 }
304 
305 
Reset()306 void VirtualMemory::Reset() {
307   address_ = NULL;
308   size_ = 0;
309 }
310 
311 
Commit(void * address,size_t size,bool is_executable)312 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
313   CHECK(InVM(address, size));
314   return CommitRegion(address, size, is_executable);
315 }
316 
317 
Uncommit(void * address,size_t size)318 bool VirtualMemory::Uncommit(void* address, size_t size) {
319   CHECK(InVM(address, size));
320   return UncommitRegion(address, size);
321 }
322 
323 
Guard(void * address)324 bool VirtualMemory::Guard(void* address) {
325   CHECK(InVM(address, OS::CommitPageSize()));
326   OS::Guard(address, OS::CommitPageSize());
327   return true;
328 }
329 
330 
ReserveRegion(size_t size)331 void* VirtualMemory::ReserveRegion(size_t size) {
332   void* result = mmap(OS::GetRandomMmapAddr(),
333                       size,
334                       PROT_NONE,
335                       MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
336                       kMmapFd,
337                       kMmapFdOffset);
338 
339   if (result == MAP_FAILED) return NULL;
340 
341 #if defined(LEAK_SANITIZER)
342   __lsan_register_root_region(result, size);
343 #endif
344   return result;
345 }
346 
347 
CommitRegion(void * base,size_t size,bool is_executable)348 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
349 #if V8_OS_NACL
350   // The Native Client port of V8 uses an interpreter,
351   // so code pages don't need PROT_EXEC.
352   int prot = PROT_READ | PROT_WRITE;
353 #else
354   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
355 #endif
356   if (MAP_FAILED == mmap(base,
357                          size,
358                          prot,
359                          MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
360                          kMmapFd,
361                          kMmapFdOffset)) {
362     return false;
363   }
364 
365   return true;
366 }
367 
368 
UncommitRegion(void * base,size_t size)369 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
370   return mmap(base,
371               size,
372               PROT_NONE,
373               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
374               kMmapFd,
375               kMmapFdOffset) != MAP_FAILED;
376 }
377 
378 
ReleaseRegion(void * base,size_t size)379 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
380 #if defined(LEAK_SANITIZER)
381   __lsan_unregister_root_region(base, size);
382 #endif
383   return munmap(base, size) == 0;
384 }
385 
386 
HasLazyCommits()387 bool VirtualMemory::HasLazyCommits() {
388   return true;
389 }
390 
391 }  // namespace base
392 }  // namespace v8
393