1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8
9 #include <errno.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 #include <pthread_np.h> // for pthread_set_name_np
14 #endif
15 #include <sched.h> // for sched_yield
16 #include <stdio.h>
17 #include <time.h>
18 #include <unistd.h>
19
20 #include <sys/mman.h>
21 #include <sys/stat.h>
22 #include <sys/time.h>
23 #include <sys/types.h>
24 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
25 defined(__NetBSD__) || defined(__OpenBSD__)
26 #include <sys/sysctl.h> // NOLINT, for sysctl
27 #endif
28
29 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30 #define LOG_TAG "v8"
31 #include <android/log.h> // NOLINT
32 #endif
33
34 #include <cmath>
35 #include <cstdlib>
36
37 #include "src/base/platform/platform-posix.h"
38
39 #include "src/base/lazy-instance.h"
40 #include "src/base/macros.h"
41 #include "src/base/platform/platform.h"
42 #include "src/base/platform/time.h"
43 #include "src/base/utils/random-number-generator.h"
44
45 #ifdef V8_FAST_TLS_SUPPORTED
46 #include "src/base/atomicops.h"
47 #endif
48
49 #if V8_OS_MACOSX
50 #include <dlfcn.h>
51 #endif
52
53 #if V8_OS_LINUX
54 #include <sys/prctl.h> // NOLINT, for prctl
55 #endif
56
57 #if defined(V8_OS_FUCHSIA)
58 #include <zircon/process.h>
59 #else
60 #include <sys/resource.h>
61 #endif
62
63 #if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
64 #include <sys/syscall.h>
65 #endif
66
67 #if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS
68 #define MAP_ANONYMOUS MAP_ANON
69 #endif
70
71 #if defined(V8_OS_SOLARIS)
72 #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
73 extern "C" int madvise(caddr_t, size_t, int);
74 #else
75 extern int madvise(caddr_t, size_t, int);
76 #endif
77 #endif
78
79 #ifndef MADV_FREE
80 #define MADV_FREE MADV_DONTNEED
81 #endif
82
83 namespace v8 {
84 namespace base {
85
86 namespace {
87
88 // 0 is never a valid thread id.
89 const pthread_t kNoThread = (pthread_t) 0;
90
91 bool g_hard_abort = false;
92
93 const char* g_gc_fake_mmap = nullptr;
94
95 static LazyInstance<RandomNumberGenerator>::type
96 platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
97 static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
98
99 #if !V8_OS_FUCHSIA
100 #if V8_OS_MACOSX
101 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
102 // defined tag 255 This helps identify V8-allocated regions in memory analysis
103 // tools like vmmap(1).
104 const int kMmapFd = VM_MAKE_TAG(255);
105 #else // !V8_OS_MACOSX
106 const int kMmapFd = -1;
107 #endif // !V8_OS_MACOSX
108
109 const int kMmapFdOffset = 0;
110
GetProtectionFromMemoryPermission(OS::MemoryPermission access)111 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
112 switch (access) {
113 case OS::MemoryPermission::kNoAccess:
114 return PROT_NONE;
115 case OS::MemoryPermission::kRead:
116 return PROT_READ;
117 case OS::MemoryPermission::kReadWrite:
118 return PROT_READ | PROT_WRITE;
119 case OS::MemoryPermission::kReadWriteExecute:
120 return PROT_READ | PROT_WRITE | PROT_EXEC;
121 case OS::MemoryPermission::kReadExecute:
122 return PROT_READ | PROT_EXEC;
123 }
124 UNREACHABLE();
125 }
126
GetFlagsForMemoryPermission(OS::MemoryPermission access)127 int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
128 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
129 if (access == OS::MemoryPermission::kNoAccess) {
130 #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
131 flags |= MAP_NORESERVE;
132 #endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
133 #if V8_OS_QNX
134 flags |= MAP_LAZY;
135 #endif // V8_OS_QNX
136 }
137 return flags;
138 }
139
Allocate(void * address,size_t size,OS::MemoryPermission access)140 void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
141 int prot = GetProtectionFromMemoryPermission(access);
142 int flags = GetFlagsForMemoryPermission(access);
143 void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
144 if (result == MAP_FAILED) return nullptr;
145 return result;
146 }
147
ReclaimInaccessibleMemory(void * address,size_t size)148 int ReclaimInaccessibleMemory(void* address, size_t size) {
149 #if defined(OS_MACOSX)
150 // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
151 // marks the pages with the reusable bit, which allows both Activity Monitor
152 // and memory-infra to correctly track the pages.
153 int ret = madvise(address, size, MADV_FREE_REUSABLE);
154 #elif defined(_AIX) || defined(V8_OS_SOLARIS)
155 int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
156 #else
157 int ret = madvise(address, size, MADV_FREE);
158 #endif
159 if (ret != 0 && errno == ENOSYS)
160 return 0; // madvise is not available on all systems.
161 if (ret != 0 && errno == EINVAL) {
162 // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
163 // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
164 // imply runtime support.
165 #if defined(_AIX) || defined(V8_OS_SOLARIS)
166 ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
167 #else
168 ret = madvise(address, size, MADV_DONTNEED);
169 #endif
170 }
171 return ret;
172 }
173
174 #endif // !V8_OS_FUCHSIA
175
176 } // namespace
177
Initialize(bool hard_abort,const char * const gc_fake_mmap)178 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
179 g_hard_abort = hard_abort;
180 g_gc_fake_mmap = gc_fake_mmap;
181 }
182
ActivationFrameAlignment()183 int OS::ActivationFrameAlignment() {
184 #if V8_TARGET_ARCH_ARM
185 // On EABI ARM targets this is required for fp correctness in the
186 // runtime system.
187 return 8;
188 #elif V8_TARGET_ARCH_MIPS
189 return 8;
190 #elif V8_TARGET_ARCH_S390
191 return 8;
192 #else
193 // Otherwise we just assume 16 byte alignment, i.e.:
194 // - With gcc 4.4 the tree vectorization optimizer can generate code
195 // that requires 16 byte alignment such as movdqa on x86.
196 // - Mac OS X, PPC and Solaris (64-bit) activation frames must
197 // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide"
198 return 16;
199 #endif
200 }
201
202 // static
AllocatePageSize()203 size_t OS::AllocatePageSize() {
204 return static_cast<size_t>(sysconf(_SC_PAGESIZE));
205 }
206
207 // static
CommitPageSize()208 size_t OS::CommitPageSize() {
209 static size_t page_size = getpagesize();
210 return page_size;
211 }
212
213 // static
SetRandomMmapSeed(int64_t seed)214 void OS::SetRandomMmapSeed(int64_t seed) {
215 if (seed) {
216 LockGuard<Mutex> guard(rng_mutex.Pointer());
217 platform_random_number_generator.Pointer()->SetSeed(seed);
218 }
219 }
220
221 // static
GetRandomMmapAddr()222 void* OS::GetRandomMmapAddr() {
223 uintptr_t raw_addr;
224 {
225 LockGuard<Mutex> guard(rng_mutex.Pointer());
226 platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
227 sizeof(raw_addr));
228 }
229 #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
230 defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
231 // If random hint addresses interfere with address ranges hard coded in
232 // sanitizers, bad things happen. This address range is copied from TSAN
233 // source but works with all tools.
234 // See crbug.com/539863.
235 raw_addr &= 0x007fffff0000ULL;
236 raw_addr += 0x7e8000000000ULL;
237 #else
238 #if V8_TARGET_ARCH_X64
239 // Currently available CPUs have 48 bits of virtual addressing. Truncate
240 // the hint address to 46 bits to give the kernel a fighting chance of
241 // fulfilling our placement request.
242 raw_addr &= uint64_t{0x3FFFFFFFF000};
243 #elif V8_TARGET_ARCH_PPC64
244 #if V8_OS_AIX
245 // AIX: 64 bits of virtual addressing, but we limit address range to:
246 // a) minimize Segment Lookaside Buffer (SLB) misses and
247 raw_addr &= uint64_t{0x3FFFF000};
248 // Use extra address space to isolate the mmap regions.
249 raw_addr += uint64_t{0x400000000000};
250 #elif V8_TARGET_BIG_ENDIAN
251 // Big-endian Linux: 42 bits of virtual addressing.
252 raw_addr &= uint64_t{0x03FFFFFFF000};
253 #else
254 // Little-endian Linux: 46 bits of virtual addressing.
255 raw_addr &= uint64_t{0x3FFFFFFF0000};
256 #endif
257 #elif V8_TARGET_ARCH_MIPS64
258 // We allocate code in 256 MB aligned segments because of optimizations using
259 // J instruction that require that all code is within a single 256 MB segment
260 raw_addr &= uint64_t{0x3FFFE0000000};
261 #elif V8_TARGET_ARCH_S390X
262 // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
263 // of virtual addressing. Truncate to 40 bits to allow kernel chance to
264 // fulfill request.
265 raw_addr &= uint64_t{0xFFFFFFF000};
266 #elif V8_TARGET_ARCH_S390
267 // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
268 // to fulfill request.
269 raw_addr &= 0x1FFFF000;
270 #else
271 raw_addr &= 0x3FFFF000;
272
273 #ifdef __sun
274 // For our Solaris/illumos mmap hint, we pick a random address in the bottom
275 // half of the top half of the address space (that is, the third quarter).
276 // Because we do not MAP_FIXED, this will be treated only as a hint -- the
277 // system will not fail to mmap() because something else happens to already
278 // be mapped at our random address. We deliberately set the hint high enough
279 // to get well above the system's break (that is, the heap); Solaris and
280 // illumos will try the hint and if that fails allocate as if there were
281 // no hint at all. The high hint prevents the break from getting hemmed in
282 // at low values, ceding half of the address space to the system heap.
283 raw_addr += 0x80000000;
284 #elif V8_OS_AIX
285 // The range 0x30000000 - 0xD0000000 is available on AIX;
286 // choose the upper range.
287 raw_addr += 0x90000000;
288 #else
289 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
290 // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
291 // 10.6 and 10.7.
292 raw_addr += 0x20000000;
293 #endif
294 #endif
295 #endif
296 return reinterpret_cast<void*>(raw_addr);
297 }
298
299 // TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
300 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
301 // static
Allocate(void * address,size_t size,size_t alignment,MemoryPermission access)302 void* OS::Allocate(void* address, size_t size, size_t alignment,
303 MemoryPermission access) {
304 size_t page_size = AllocatePageSize();
305 DCHECK_EQ(0, size % page_size);
306 DCHECK_EQ(0, alignment % page_size);
307 address = AlignedAddress(address, alignment);
308 // Add the maximum misalignment so we are guaranteed an aligned base address.
309 size_t request_size = size + (alignment - page_size);
310 request_size = RoundUp(request_size, OS::AllocatePageSize());
311 void* result = base::Allocate(address, request_size, access);
312 if (result == nullptr) return nullptr;
313
314 // Unmap memory allocated before the aligned base address.
315 uint8_t* base = static_cast<uint8_t*>(result);
316 uint8_t* aligned_base = RoundUp(base, alignment);
317 if (aligned_base != base) {
318 DCHECK_LT(base, aligned_base);
319 size_t prefix_size = static_cast<size_t>(aligned_base - base);
320 CHECK(Free(base, prefix_size));
321 request_size -= prefix_size;
322 }
323 // Unmap memory allocated after the potentially unaligned end.
324 if (size != request_size) {
325 DCHECK_LT(size, request_size);
326 size_t suffix_size = request_size - size;
327 CHECK(Free(aligned_base + size, suffix_size));
328 request_size -= suffix_size;
329 }
330
331 DCHECK_EQ(size, request_size);
332 return static_cast<void*>(aligned_base);
333 }
334
335 // static
Free(void * address,const size_t size)336 bool OS::Free(void* address, const size_t size) {
337 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
338 DCHECK_EQ(0, size % AllocatePageSize());
339 return munmap(address, size) == 0;
340 }
341
342 // static
Release(void * address,size_t size)343 bool OS::Release(void* address, size_t size) {
344 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
345 DCHECK_EQ(0, size % CommitPageSize());
346 return munmap(address, size) == 0;
347 }
348
349 // static
SetPermissions(void * address,size_t size,MemoryPermission access)350 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
351 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
352 DCHECK_EQ(0, size % CommitPageSize());
353
354 int prot = GetProtectionFromMemoryPermission(access);
355 int ret = mprotect(address, size, prot);
356 if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
357 // This is advisory; ignore errors and continue execution.
358 ReclaimInaccessibleMemory(address, size);
359 }
360
361 // For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
362 // changing permissions away from OS::MemoryPermission::kNoAccess. Since this
363 // state is not kept at this layer, we always call this if access != kNoAccess.
364 // The cost is a syscall that effectively no-ops.
365 // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
366 // https://crbug.com/823915
367 #if defined(OS_MACOSX)
368 if (access != OS::MemoryPermission::kNoAccess)
369 madvise(address, size, MADV_FREE_REUSE);
370 #endif
371
372 return ret == 0;
373 }
374
375 // static
HasLazyCommits()376 bool OS::HasLazyCommits() {
377 #if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
378 return true;
379 #else
380 // TODO(bbudge) Return true for all POSIX platforms.
381 return false;
382 #endif
383 }
384 #endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
385
GetGCFakeMMapFile()386 const char* OS::GetGCFakeMMapFile() {
387 return g_gc_fake_mmap;
388 }
389
390
Sleep(TimeDelta interval)391 void OS::Sleep(TimeDelta interval) {
392 usleep(static_cast<useconds_t>(interval.InMicroseconds()));
393 }
394
395
Abort()396 void OS::Abort() {
397 if (g_hard_abort) {
398 V8_IMMEDIATE_CRASH();
399 }
400 // Redirect to std abort to signal abnormal program termination.
401 abort();
402 }
403
404
DebugBreak()405 void OS::DebugBreak() {
406 #if V8_HOST_ARCH_ARM
407 asm("bkpt 0");
408 #elif V8_HOST_ARCH_ARM64
409 asm("brk 0");
410 #elif V8_HOST_ARCH_MIPS
411 asm("break");
412 #elif V8_HOST_ARCH_MIPS64
413 asm("break");
414 #elif V8_HOST_ARCH_PPC
415 asm("twge 2,2");
416 #elif V8_HOST_ARCH_IA32
417 asm("int $3");
418 #elif V8_HOST_ARCH_X64
419 asm("int $3");
420 #elif V8_HOST_ARCH_S390
421 // Software breakpoint instruction is 0x0001
422 asm volatile(".word 0x0001");
423 #else
424 #error Unsupported host architecture.
425 #endif
426 }
427
428
429 class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
430 public:
PosixMemoryMappedFile(FILE * file,void * memory,size_t size)431 PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
432 : file_(file), memory_(memory), size_(size) {}
433 ~PosixMemoryMappedFile() final;
memory() const434 void* memory() const final { return memory_; }
size() const435 size_t size() const final { return size_; }
436
437 private:
438 FILE* const file_;
439 void* const memory_;
440 size_t const size_;
441 };
442
443
444 // static
open(const char * name)445 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
446 if (FILE* file = fopen(name, "r+")) {
447 if (fseek(file, 0, SEEK_END) == 0) {
448 long size = ftell(file); // NOLINT(runtime/int)
449 if (size >= 0) {
450 void* const memory =
451 mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
452 MAP_SHARED, fileno(file), 0);
453 if (memory != MAP_FAILED) {
454 return new PosixMemoryMappedFile(file, memory, size);
455 }
456 }
457 }
458 fclose(file);
459 }
460 return nullptr;
461 }
462
463
464 // static
create(const char * name,size_t size,void * initial)465 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
466 size_t size, void* initial) {
467 if (FILE* file = fopen(name, "w+")) {
468 size_t result = fwrite(initial, 1, size, file);
469 if (result == size && !ferror(file)) {
470 void* memory = mmap(OS::GetRandomMmapAddr(), result,
471 PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
472 if (memory != MAP_FAILED) {
473 return new PosixMemoryMappedFile(file, memory, result);
474 }
475 }
476 fclose(file);
477 }
478 return nullptr;
479 }
480
481
~PosixMemoryMappedFile()482 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
483 if (memory_) CHECK(OS::Free(memory_, size_));
484 fclose(file_);
485 }
486
487
GetCurrentProcessId()488 int OS::GetCurrentProcessId() {
489 return static_cast<int>(getpid());
490 }
491
492
GetCurrentThreadId()493 int OS::GetCurrentThreadId() {
494 #if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
495 return static_cast<int>(pthread_mach_thread_np(pthread_self()));
496 #elif V8_OS_LINUX
497 return static_cast<int>(syscall(__NR_gettid));
498 #elif V8_OS_ANDROID
499 return static_cast<int>(gettid());
500 #elif V8_OS_AIX
501 return static_cast<int>(thread_self());
502 #elif V8_OS_FUCHSIA
503 return static_cast<int>(zx_thread_self());
504 #elif V8_OS_SOLARIS
505 return static_cast<int>(pthread_self());
506 #else
507 return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
508 #endif
509 }
510
ExitProcess(int exit_code)511 void OS::ExitProcess(int exit_code) {
512 // Use _exit instead of exit to avoid races between isolate
513 // threads and static destructors.
514 fflush(stdout);
515 fflush(stderr);
516 _exit(exit_code);
517 }
518
519 // ----------------------------------------------------------------------------
520 // POSIX date/time support.
521 //
522
523 #if !defined(V8_OS_FUCHSIA)
GetUserTime(uint32_t * secs,uint32_t * usecs)524 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
525 struct rusage usage;
526
527 if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
528 *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
529 *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
530 return 0;
531 }
532 #endif
533
TimeCurrentMillis()534 double OS::TimeCurrentMillis() {
535 return Time::Now().ToJsTime();
536 }
537
DaylightSavingsOffset(double time)538 double PosixTimezoneCache::DaylightSavingsOffset(double time) {
539 if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
540 time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
541 struct tm tm;
542 struct tm* t = localtime_r(&tv, &tm);
543 if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
544 return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
545 }
546
547
GetLastError()548 int OS::GetLastError() {
549 return errno;
550 }
551
552
553 // ----------------------------------------------------------------------------
554 // POSIX stdio support.
555 //
556
FOpen(const char * path,const char * mode)557 FILE* OS::FOpen(const char* path, const char* mode) {
558 FILE* file = fopen(path, mode);
559 if (file == nullptr) return nullptr;
560 struct stat file_stat;
561 if (fstat(fileno(file), &file_stat) != 0) {
562 fclose(file);
563 return nullptr;
564 }
565 bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
566 if (is_regular_file) return file;
567 fclose(file);
568 return nullptr;
569 }
570
571
Remove(const char * path)572 bool OS::Remove(const char* path) {
573 return (remove(path) == 0);
574 }
575
DirectorySeparator()576 char OS::DirectorySeparator() { return '/'; }
577
isDirectorySeparator(const char ch)578 bool OS::isDirectorySeparator(const char ch) {
579 return ch == DirectorySeparator();
580 }
581
582
OpenTemporaryFile()583 FILE* OS::OpenTemporaryFile() {
584 return tmpfile();
585 }
586
587
588 const char* const OS::LogFileOpenMode = "w";
589
590
Print(const char * format,...)591 void OS::Print(const char* format, ...) {
592 va_list args;
593 va_start(args, format);
594 VPrint(format, args);
595 va_end(args);
596 }
597
598
VPrint(const char * format,va_list args)599 void OS::VPrint(const char* format, va_list args) {
600 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
601 __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
602 #else
603 vprintf(format, args);
604 #endif
605 }
606
607
FPrint(FILE * out,const char * format,...)608 void OS::FPrint(FILE* out, const char* format, ...) {
609 va_list args;
610 va_start(args, format);
611 VFPrint(out, format, args);
612 va_end(args);
613 }
614
615
VFPrint(FILE * out,const char * format,va_list args)616 void OS::VFPrint(FILE* out, const char* format, va_list args) {
617 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
618 __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
619 #else
620 vfprintf(out, format, args);
621 #endif
622 }
623
624
PrintError(const char * format,...)625 void OS::PrintError(const char* format, ...) {
626 va_list args;
627 va_start(args, format);
628 VPrintError(format, args);
629 va_end(args);
630 }
631
632
VPrintError(const char * format,va_list args)633 void OS::VPrintError(const char* format, va_list args) {
634 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
635 __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
636 #else
637 vfprintf(stderr, format, args);
638 #endif
639 }
640
641
SNPrintF(char * str,int length,const char * format,...)642 int OS::SNPrintF(char* str, int length, const char* format, ...) {
643 va_list args;
644 va_start(args, format);
645 int result = VSNPrintF(str, length, format, args);
646 va_end(args);
647 return result;
648 }
649
650
VSNPrintF(char * str,int length,const char * format,va_list args)651 int OS::VSNPrintF(char* str,
652 int length,
653 const char* format,
654 va_list args) {
655 int n = vsnprintf(str, length, format, args);
656 if (n < 0 || n >= length) {
657 // If the length is zero, the assignment fails.
658 if (length > 0)
659 str[length - 1] = '\0';
660 return -1;
661 } else {
662 return n;
663 }
664 }
665
666
667 // ----------------------------------------------------------------------------
668 // POSIX string support.
669 //
670
StrChr(char * str,int c)671 char* OS::StrChr(char* str, int c) {
672 return strchr(str, c);
673 }
674
675
StrNCpy(char * dest,int length,const char * src,size_t n)676 void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
677 strncpy(dest, src, n);
678 }
679
680
681 // ----------------------------------------------------------------------------
682 // POSIX thread support.
683 //
684
685 class Thread::PlatformData {
686 public:
PlatformData()687 PlatformData() : thread_(kNoThread) {}
688 pthread_t thread_; // Thread handle for pthread.
689 // Synchronizes thread creation
690 Mutex thread_creation_mutex_;
691 };
692
Thread(const Options & options)693 Thread::Thread(const Options& options)
694 : data_(new PlatformData),
695 stack_size_(options.stack_size()),
696 start_semaphore_(nullptr) {
697 if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
698 stack_size_ = PTHREAD_STACK_MIN;
699 }
700 set_name(options.name());
701 }
702
703
~Thread()704 Thread::~Thread() {
705 delete data_;
706 }
707
708
SetThreadName(const char * name)709 static void SetThreadName(const char* name) {
710 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
711 pthread_set_name_np(pthread_self(), name);
712 #elif V8_OS_NETBSD
713 STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
714 pthread_setname_np(pthread_self(), "%s", name);
715 #elif V8_OS_MACOSX
716 // pthread_setname_np is only available in 10.6 or later, so test
717 // for it at runtime.
718 int (*dynamic_pthread_setname_np)(const char*);
719 *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
720 dlsym(RTLD_DEFAULT, "pthread_setname_np");
721 if (dynamic_pthread_setname_np == nullptr) return;
722
723 // Mac OS X does not expose the length limit of the name, so hardcode it.
724 static const int kMaxNameLength = 63;
725 STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
726 dynamic_pthread_setname_np(name);
727 #elif defined(PR_SET_NAME)
728 prctl(PR_SET_NAME,
729 reinterpret_cast<unsigned long>(name), // NOLINT
730 0, 0, 0);
731 #endif
732 }
733
734
ThreadEntry(void * arg)735 static void* ThreadEntry(void* arg) {
736 Thread* thread = reinterpret_cast<Thread*>(arg);
737 // We take the lock here to make sure that pthread_create finished first since
738 // we don't know which thread will run first (the original thread or the new
739 // one).
740 { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
741 SetThreadName(thread->name());
742 DCHECK_NE(thread->data()->thread_, kNoThread);
743 thread->NotifyStartedAndRun();
744 return nullptr;
745 }
746
747
set_name(const char * name)748 void Thread::set_name(const char* name) {
749 strncpy(name_, name, sizeof(name_));
750 name_[sizeof(name_) - 1] = '\0';
751 }
752
753
Start()754 void Thread::Start() {
755 int result;
756 pthread_attr_t attr;
757 memset(&attr, 0, sizeof(attr));
758 result = pthread_attr_init(&attr);
759 DCHECK_EQ(0, result);
760 size_t stack_size = stack_size_;
761 if (stack_size == 0) {
762 #if V8_OS_MACOSX
763 // Default on Mac OS X is 512kB -- bump up to 1MB
764 stack_size = 1 * 1024 * 1024;
765 #elif V8_OS_AIX
766 // Default on AIX is 96kB -- bump up to 2MB
767 stack_size = 2 * 1024 * 1024;
768 #endif
769 }
770 if (stack_size > 0) {
771 result = pthread_attr_setstacksize(&attr, stack_size);
772 DCHECK_EQ(0, result);
773 }
774 {
775 LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
776 result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
777 }
778 DCHECK_EQ(0, result);
779 result = pthread_attr_destroy(&attr);
780 DCHECK_EQ(0, result);
781 DCHECK_NE(data_->thread_, kNoThread);
782 USE(result);
783 }
784
Join()785 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
786
PthreadKeyToLocalKey(pthread_key_t pthread_key)787 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
788 #if V8_OS_CYGWIN
789 // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
790 // because pthread_key_t is a pointer type on Cygwin. This will probably not
791 // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
792 STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
793 intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
794 return static_cast<Thread::LocalStorageKey>(ptr_key);
795 #else
796 return static_cast<Thread::LocalStorageKey>(pthread_key);
797 #endif
798 }
799
800
LocalKeyToPthreadKey(Thread::LocalStorageKey local_key)801 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
802 #if V8_OS_CYGWIN
803 STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
804 intptr_t ptr_key = static_cast<intptr_t>(local_key);
805 return reinterpret_cast<pthread_key_t>(ptr_key);
806 #else
807 return static_cast<pthread_key_t>(local_key);
808 #endif
809 }
810
811
812 #ifdef V8_FAST_TLS_SUPPORTED
813
814 static Atomic32 tls_base_offset_initialized = 0;
815 intptr_t kMacTlsBaseOffset = 0;
816
817 // It's safe to do the initialization more that once, but it has to be
818 // done at least once.
InitializeTlsBaseOffset()819 static void InitializeTlsBaseOffset() {
820 const size_t kBufferSize = 128;
821 char buffer[kBufferSize];
822 size_t buffer_size = kBufferSize;
823 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
824 if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
825 FATAL("V8 failed to get kernel version");
826 }
827 // The buffer now contains a string of the form XX.YY.ZZ, where
828 // XX is the major kernel version component.
829 // Make sure the buffer is 0-terminated.
830 buffer[kBufferSize - 1] = '\0';
831 char* period_pos = strchr(buffer, '.');
832 *period_pos = '\0';
833 int kernel_version_major =
834 static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
835 // The constants below are taken from pthreads.s from the XNU kernel
836 // sources archive at www.opensource.apple.com.
837 if (kernel_version_major < 11) {
838 // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
839 // same offsets.
840 #if V8_HOST_ARCH_IA32
841 kMacTlsBaseOffset = 0x48;
842 #else
843 kMacTlsBaseOffset = 0x60;
844 #endif
845 } else {
846 // 11.x.x (Lion) changed the offset.
847 kMacTlsBaseOffset = 0;
848 }
849
850 Release_Store(&tls_base_offset_initialized, 1);
851 }
852
853
CheckFastTls(Thread::LocalStorageKey key)854 static void CheckFastTls(Thread::LocalStorageKey key) {
855 void* expected = reinterpret_cast<void*>(0x1234CAFE);
856 Thread::SetThreadLocal(key, expected);
857 void* actual = Thread::GetExistingThreadLocal(key);
858 if (expected != actual) {
859 FATAL("V8 failed to initialize fast TLS on current kernel");
860 }
861 Thread::SetThreadLocal(key, nullptr);
862 }
863
864 #endif // V8_FAST_TLS_SUPPORTED
865
866
CreateThreadLocalKey()867 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
868 #ifdef V8_FAST_TLS_SUPPORTED
869 bool check_fast_tls = false;
870 if (tls_base_offset_initialized == 0) {
871 check_fast_tls = true;
872 InitializeTlsBaseOffset();
873 }
874 #endif
875 pthread_key_t key;
876 int result = pthread_key_create(&key, nullptr);
877 DCHECK_EQ(0, result);
878 USE(result);
879 LocalStorageKey local_key = PthreadKeyToLocalKey(key);
880 #ifdef V8_FAST_TLS_SUPPORTED
881 // If we just initialized fast TLS support, make sure it works.
882 if (check_fast_tls) CheckFastTls(local_key);
883 #endif
884 return local_key;
885 }
886
887
DeleteThreadLocalKey(LocalStorageKey key)888 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
889 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
890 int result = pthread_key_delete(pthread_key);
891 DCHECK_EQ(0, result);
892 USE(result);
893 }
894
895
GetThreadLocal(LocalStorageKey key)896 void* Thread::GetThreadLocal(LocalStorageKey key) {
897 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
898 return pthread_getspecific(pthread_key);
899 }
900
901
SetThreadLocal(LocalStorageKey key,void * value)902 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
903 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
904 int result = pthread_setspecific(pthread_key, value);
905 DCHECK_EQ(0, result);
906 USE(result);
907 }
908
909 #undef LOG_TAG
910 #undef MAP_ANONYMOUS
911 #undef MADV_FREE
912
913 } // namespace base
914 } // namespace v8
915