1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <zircon/process.h>
6 #include <zircon/syscalls.h>
7
8 #include "src/base/macros.h"
9 #include "src/base/platform/platform-posix-time.h"
10 #include "src/base/platform/platform-posix.h"
11 #include "src/base/platform/platform.h"
12
13 namespace v8 {
14 namespace base {
15
16 namespace {
17
GetProtectionFromMemoryPermission(OS::MemoryPermission access)18 uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
19 switch (access) {
20 case OS::MemoryPermission::kNoAccess:
21 return 0; // no permissions
22 case OS::MemoryPermission::kRead:
23 return ZX_VM_FLAG_PERM_READ;
24 case OS::MemoryPermission::kReadWrite:
25 return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
26 case OS::MemoryPermission::kReadWriteExecute:
27 return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
28 ZX_VM_FLAG_PERM_EXECUTE;
29 case OS::MemoryPermission::kReadExecute:
30 return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE;
31 }
32 UNREACHABLE();
33 }
34
35 } // namespace
36
CreateTimezoneCache()37 TimezoneCache* OS::CreateTimezoneCache() {
38 return new PosixDefaultTimezoneCache();
39 }
40
41 // static
Allocate(void * address,size_t size,size_t alignment,OS::MemoryPermission access)42 void* OS::Allocate(void* address, size_t size, size_t alignment,
43 OS::MemoryPermission access) {
44 size_t page_size = OS::AllocatePageSize();
45 DCHECK_EQ(0, size % page_size);
46 DCHECK_EQ(0, alignment % page_size);
47 address = AlignedAddress(address, alignment);
48 // Add the maximum misalignment so we are guaranteed an aligned base address.
49 size_t request_size = size + (alignment - page_size);
50
51 zx_handle_t vmo;
52 if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
53 return nullptr;
54 }
55 static const char kVirtualMemoryName[] = "v8-virtualmem";
56 zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
57 strlen(kVirtualMemoryName));
58 uintptr_t reservation;
59 uint32_t prot = GetProtectionFromMemoryPermission(access);
60 zx_status_t status = zx_vmar_map_old(zx_vmar_root_self(), 0, vmo, 0,
61 request_size, prot, &reservation);
62 // Either the vmo is now referenced by the vmar, or we failed and are bailing,
63 // so close the vmo either way.
64 zx_handle_close(vmo);
65 if (status != ZX_OK) {
66 return nullptr;
67 }
68
69 uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
70 uint8_t* aligned_base = RoundUp(base, alignment);
71
72 // Unmap extra memory reserved before and after the desired block.
73 if (aligned_base != base) {
74 DCHECK_LT(base, aligned_base);
75 size_t prefix_size = static_cast<size_t>(aligned_base - base);
76 zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
77 prefix_size);
78 request_size -= prefix_size;
79 }
80
81 size_t aligned_size = RoundUp(size, page_size);
82
83 if (aligned_size != request_size) {
84 DCHECK_LT(aligned_size, request_size);
85 size_t suffix_size = request_size - aligned_size;
86 zx_vmar_unmap(zx_vmar_root_self(),
87 reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
88 suffix_size);
89 request_size -= suffix_size;
90 }
91
92 DCHECK(aligned_size == request_size);
93 return static_cast<void*>(aligned_base);
94 }
95
96 // static
Free(void * address,const size_t size)97 bool OS::Free(void* address, const size_t size) {
98 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
99 DCHECK_EQ(0, size % AllocatePageSize());
100 return zx_vmar_unmap(zx_vmar_root_self(),
101 reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
102 }
103
104 // static
Release(void * address,size_t size)105 bool OS::Release(void* address, size_t size) {
106 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
107 DCHECK_EQ(0, size % CommitPageSize());
108 return zx_vmar_unmap(zx_vmar_root_self(),
109 reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
110 }
111
112 // static
SetPermissions(void * address,size_t size,MemoryPermission access)113 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
114 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
115 DCHECK_EQ(0, size % CommitPageSize());
116 uint32_t prot = GetProtectionFromMemoryPermission(access);
117 return zx_vmar_protect_old(zx_vmar_root_self(),
118 reinterpret_cast<uintptr_t>(address), size,
119 prot) == ZX_OK;
120 }
121
122 // static
HasLazyCommits()123 bool OS::HasLazyCommits() {
124 // TODO(scottmg): Port, https://crbug.com/731217.
125 return false;
126 }
127
GetSharedLibraryAddresses()128 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
129 UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
130 }
131
SignalCodeMovingGC()132 void OS::SignalCodeMovingGC() {
133 UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
134 }
135
GetUserTime(uint32_t * secs,uint32_t * usecs)136 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
137 const auto kNanosPerMicrosecond = 1000ULL;
138 const auto kMicrosPerSecond = 1000000ULL;
139 const zx_time_t nanos_since_thread_started = zx_clock_get(ZX_CLOCK_THREAD);
140
141 // First convert to microseconds, rounding up.
142 const uint64_t micros_since_thread_started =
143 (nanos_since_thread_started + kNanosPerMicrosecond - 1ULL) /
144 kNanosPerMicrosecond;
145
146 *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
147 *usecs =
148 static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
149 return 0;
150 }
151
152 } // namespace base
153 } // namespace v8
154