1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/futex-emulation.h"
6
7 #include <limits>
8
9 #include "src/base/macros.h"
10 #include "src/base/platform/time.h"
11 #include "src/conversions.h"
12 #include "src/handles-inl.h"
13 #include "src/isolate.h"
14 #include "src/objects-inl.h"
15 #include "src/objects/js-array-buffer-inl.h"
16
17 namespace v8 {
18 namespace internal {
19
20 using AtomicsWaitEvent = v8::Isolate::AtomicsWaitEvent;
21
22 base::LazyMutex FutexEmulation::mutex_ = LAZY_MUTEX_INITIALIZER;
23 base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
24 LAZY_INSTANCE_INITIALIZER;
25
26
NotifyWake()27 void FutexWaitListNode::NotifyWake() {
28 // Lock the FutexEmulation mutex before notifying. We know that the mutex
29 // will have been unlocked if we are currently waiting on the condition
30 // variable.
31 //
32 // The mutex may also not be locked if the other thread is currently handling
33 // interrupts, or if FutexEmulation::Wait was just called and the mutex
34 // hasn't been locked yet. In either of those cases, we set the interrupted
35 // flag to true, which will be tested after the mutex is re-locked.
36 base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
37 if (waiting_) {
38 cond_.NotifyOne();
39 interrupted_ = true;
40 }
41 }
42
43
FutexWaitList()44 FutexWaitList::FutexWaitList() : head_(nullptr), tail_(nullptr) {}
45
46
AddNode(FutexWaitListNode * node)47 void FutexWaitList::AddNode(FutexWaitListNode* node) {
48 DCHECK(node->prev_ == nullptr && node->next_ == nullptr);
49 if (tail_) {
50 tail_->next_ = node;
51 } else {
52 head_ = node;
53 }
54
55 node->prev_ = tail_;
56 node->next_ = nullptr;
57 tail_ = node;
58 }
59
60
RemoveNode(FutexWaitListNode * node)61 void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
62 if (node->prev_) {
63 node->prev_->next_ = node->next_;
64 } else {
65 head_ = node->next_;
66 }
67
68 if (node->next_) {
69 node->next_->prev_ = node->prev_;
70 } else {
71 tail_ = node->prev_;
72 }
73
74 node->prev_ = node->next_ = nullptr;
75 }
76
Wake()77 void AtomicsWaitWakeHandle::Wake() {
78 // Adding a separate `NotifyWake()` variant that doesn't acquire the lock
79 // itself would likely just add unnecessary complexity..
80 // The split lock by itself isn’t an issue, as long as the caller properly
81 // synchronizes this with the closing `AtomicsWaitCallback`.
82 {
83 base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
84 stopped_ = true;
85 }
86 isolate_->futex_wait_list_node()->NotifyWake();
87 }
88
Wait(Isolate * isolate,Handle<JSArrayBuffer> array_buffer,size_t addr,int32_t value,double rel_timeout_ms)89 Object* FutexEmulation::Wait(Isolate* isolate,
90 Handle<JSArrayBuffer> array_buffer, size_t addr,
91 int32_t value, double rel_timeout_ms) {
92 DCHECK(addr < NumberToSize(array_buffer->byte_length()));
93
94 void* backing_store = array_buffer->backing_store();
95 int32_t* p =
96 reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
97
98 FutexWaitListNode* node = isolate->futex_wait_list_node();
99 node->backing_store_ = backing_store;
100 node->wait_addr_ = addr;
101 node->waiting_ = true;
102
103 bool use_timeout = rel_timeout_ms != V8_INFINITY;
104
105 base::TimeDelta rel_timeout;
106 if (use_timeout) {
107 // Convert to nanoseconds.
108 double rel_timeout_ns = rel_timeout_ms *
109 base::Time::kNanosecondsPerMicrosecond *
110 base::Time::kMicrosecondsPerMillisecond;
111 if (rel_timeout_ns >
112 static_cast<double>(std::numeric_limits<int64_t>::max())) {
113 // 2**63 nanoseconds is 292 years. Let's just treat anything greater as
114 // infinite.
115 use_timeout = false;
116 } else {
117 rel_timeout = base::TimeDelta::FromNanoseconds(
118 static_cast<int64_t>(rel_timeout_ns));
119 }
120 }
121
122 AtomicsWaitWakeHandle stop_handle(isolate);
123
124 isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
125 addr, value, rel_timeout_ms, &stop_handle);
126
127 if (isolate->has_scheduled_exception()) {
128 node->waiting_ = false;
129 return isolate->PromoteScheduledException();
130 }
131
132 Object* result;
133 AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
134
135 do { // Not really a loop, just makes it easier to break out early.
136 base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
137 // Reset node->waiting_ = false when leaving this scope (but while
138 // still holding the lock).
139 ResetWaitingOnScopeExit reset_waiting(node);
140
141 if (*p != value) {
142 result = ReadOnlyRoots(isolate).not_equal();
143 callback_result = AtomicsWaitEvent::kNotEqual;
144 break;
145 }
146
147 base::TimeTicks timeout_time;
148 base::TimeTicks current_time;
149
150 if (use_timeout) {
151 current_time = base::TimeTicks::Now();
152 timeout_time = current_time + rel_timeout;
153 }
154
155 wait_list_.Pointer()->AddNode(node);
156
157 while (true) {
158 bool interrupted = node->interrupted_;
159 node->interrupted_ = false;
160
161 // Unlock the mutex here to prevent deadlock from lock ordering between
162 // mutex_ and mutexes locked by HandleInterrupts.
163 mutex_.Pointer()->Unlock();
164
165 // Because the mutex is unlocked, we have to be careful about not dropping
166 // an interrupt. The notification can happen in three different places:
167 // 1) Before Wait is called: the notification will be dropped, but
168 // interrupted_ will be set to 1. This will be checked below.
169 // 2) After interrupted has been checked here, but before mutex_ is
170 // acquired: interrupted is checked again below, with mutex_ locked.
171 // Because the wakeup signal also acquires mutex_, we know it will not
172 // be able to notify until mutex_ is released below, when waiting on
173 // the condition variable.
174 // 3) After the mutex is released in the call to WaitFor(): this
175 // notification will wake up the condition variable. node->waiting() will
176 // be false, so we'll loop and then check interrupts.
177 if (interrupted) {
178 Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
179 if (interrupt_object->IsException(isolate)) {
180 result = interrupt_object;
181 callback_result = AtomicsWaitEvent::kTerminatedExecution;
182 mutex_.Pointer()->Lock();
183 break;
184 }
185 }
186
187 mutex_.Pointer()->Lock();
188
189 if (node->interrupted_) {
190 // An interrupt occurred while the mutex_ was unlocked. Don't wait yet.
191 continue;
192 }
193
194 if (stop_handle.has_stopped()) {
195 node->waiting_ = false;
196 callback_result = AtomicsWaitEvent::kAPIStopped;
197 }
198
199 if (!node->waiting_) {
200 result = ReadOnlyRoots(isolate).ok();
201 break;
202 }
203
204 // No interrupts, now wait.
205 if (use_timeout) {
206 current_time = base::TimeTicks::Now();
207 if (current_time >= timeout_time) {
208 result = ReadOnlyRoots(isolate).timed_out();
209 callback_result = AtomicsWaitEvent::kTimedOut;
210 break;
211 }
212
213 base::TimeDelta time_until_timeout = timeout_time - current_time;
214 DCHECK_GE(time_until_timeout.InMicroseconds(), 0);
215 bool wait_for_result =
216 node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
217 USE(wait_for_result);
218 } else {
219 node->cond_.Wait(mutex_.Pointer());
220 }
221
222 // Spurious wakeup, interrupt or timeout.
223 }
224
225 wait_list_.Pointer()->RemoveNode(node);
226 } while (0);
227
228 isolate->RunAtomicsWaitCallback(callback_result, array_buffer, addr, value,
229 rel_timeout_ms, nullptr);
230
231 if (isolate->has_scheduled_exception()) {
232 CHECK_NE(callback_result, AtomicsWaitEvent::kTerminatedExecution);
233 result = isolate->PromoteScheduledException();
234 }
235
236 return result;
237 }
238
Wake(Handle<JSArrayBuffer> array_buffer,size_t addr,uint32_t num_waiters_to_wake)239 Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
240 uint32_t num_waiters_to_wake) {
241 DCHECK(addr < NumberToSize(array_buffer->byte_length()));
242
243 int waiters_woken = 0;
244 void* backing_store = array_buffer->backing_store();
245
246 base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
247 FutexWaitListNode* node = wait_list_.Pointer()->head_;
248 while (node && num_waiters_to_wake > 0) {
249 if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
250 node->waiting_ = false;
251 node->cond_.NotifyOne();
252 if (num_waiters_to_wake != kWakeAll) {
253 --num_waiters_to_wake;
254 }
255 waiters_woken++;
256 }
257
258 node = node->next_;
259 }
260
261 return Smi::FromInt(waiters_woken);
262 }
263
NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,size_t addr)264 Object* FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
265 size_t addr) {
266 DCHECK(addr < NumberToSize(array_buffer->byte_length()));
267 void* backing_store = array_buffer->backing_store();
268
269 base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
270
271 int waiters = 0;
272 FutexWaitListNode* node = wait_list_.Pointer()->head_;
273 while (node) {
274 if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
275 node->waiting_) {
276 waiters++;
277 }
278
279 node = node->next_;
280 }
281
282 return Smi::FromInt(waiters);
283 }
284
285 } // namespace internal
286 } // namespace v8
287