1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2SurfaceSyncObj"
19 #include <limits.h>
20 #include <linux/futex.h>
21 #include <sys/mman.h>
22 #include <sys/syscall.h>
23 #include <sys/time.h>
24 #include <utils/Log.h>
25
26 #include <chrono>
27 #include <C2SurfaceSyncObj.h>
28
29 namespace {
timespec_add_ms(timespec & ts,size_t ms)30 static inline void timespec_add_ms(timespec& ts, size_t ms) {
31 constexpr int kNanoSecondsPerSec = 1000000000;
32 ts.tv_sec += ms / 1000;
33 ts.tv_nsec += (ms % 1000) * 1000000;
34 if (ts.tv_nsec >= kNanoSecondsPerSec) {
35 ts.tv_sec++;
36 ts.tv_nsec -= kNanoSecondsPerSec;
37 }
38 }
39
40 /*
41 * lhs < rhs: return <0
42 * lhs == rhs: return 0
43 * lhs > rhs: return >0
44 */
timespec_compare(const timespec & lhs,const timespec & rhs)45 static inline int timespec_compare(const timespec& lhs, const timespec& rhs) {
46 if (lhs.tv_sec < rhs.tv_sec) {
47 return -1;
48 }
49 if (lhs.tv_sec > rhs.tv_sec) {
50 return 1;
51 }
52 return lhs.tv_nsec - rhs.tv_nsec;
53 }
54 }
55
56 const native_handle_t C2SurfaceSyncMemory::HandleSyncMem::cHeader = {
57 C2SurfaceSyncMemory::HandleSyncMem::version,
58 C2SurfaceSyncMemory::HandleSyncMem::numFds,
59 C2SurfaceSyncMemory::HandleSyncMem::numInts,
60 {}
61 };
62
isValid(const native_handle_t * const o)63 bool C2SurfaceSyncMemory::HandleSyncMem::isValid(const native_handle_t * const o) {
64 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
65 return false;
66 }
67
68 const HandleSyncMem *other = static_cast<const HandleSyncMem*>(o);
69 return other->mInts.mMagic == kMagic;
70 }
71
C2SurfaceSyncMemory()72 C2SurfaceSyncMemory::C2SurfaceSyncMemory()
73 : mInit(false), mHandle(nullptr), mMem(nullptr) {}
74
~C2SurfaceSyncMemory()75 C2SurfaceSyncMemory::~C2SurfaceSyncMemory() {
76 if (mInit) {
77 if (mMem) {
78 munmap(static_cast<void *>(mMem), mHandle->size());
79 }
80 if (mHandle) {
81 native_handle_close(mHandle);
82 native_handle_delete(mHandle);
83 }
84 }
85 }
86
Import(native_handle_t * handle)87 std::shared_ptr<C2SurfaceSyncMemory> C2SurfaceSyncMemory::Import(
88 native_handle_t *handle) {
89 if (!HandleSyncMem::isValid(handle)) {
90 return nullptr;
91 }
92
93 HandleSyncMem *o = static_cast<HandleSyncMem*>(handle);
94 if (o->size() < sizeof(C2SyncVariables)) {
95 android_errorWriteLog(0x534e4554, "240140929");
96 return nullptr;
97 }
98
99 void *ptr = mmap(NULL, o->size(), PROT_READ | PROT_WRITE, MAP_SHARED, o->memFd(), 0);
100
101 if (ptr == MAP_FAILED) {
102 native_handle_close(handle);
103 native_handle_delete(handle);
104 return nullptr;
105 }
106
107 std::shared_ptr<C2SurfaceSyncMemory> syncMem(new C2SurfaceSyncMemory);
108 syncMem->mInit = true;
109 syncMem->mHandle = o;
110 syncMem->mMem = static_cast<C2SyncVariables*>(ptr);
111 return syncMem;
112 }
113
Create(int fd,size_t size)114 std::shared_ptr<C2SurfaceSyncMemory> C2SurfaceSyncMemory::Create(int fd, size_t size) {
115 if (fd < 0 || size == 0) {
116 return nullptr;
117 }
118 HandleSyncMem *handle = new HandleSyncMem(fd, size);
119
120 void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
121 if (ptr == MAP_FAILED) {
122 native_handle_close(handle);
123 native_handle_delete(handle);
124 return nullptr;
125 }
126 memset(ptr, 0, size);
127
128 std::shared_ptr<C2SurfaceSyncMemory> syncMem(new C2SurfaceSyncMemory);
129 syncMem->mInit = true;
130 syncMem->mHandle = handle;
131 syncMem->mMem = static_cast<C2SyncVariables*>(ptr);
132 return syncMem;
133 }
134
handle()135 native_handle_t *C2SurfaceSyncMemory::handle() {
136 return !mInit ? nullptr : mHandle;
137 }
138
mem()139 C2SyncVariables *C2SurfaceSyncMemory::mem() {
140 return !mInit ? nullptr : mMem;
141 }
142
143 namespace {
144 constexpr int kSpinNumForLock = 0;
145 constexpr int kSpinNumForUnlock = 0;
146
147 enum : uint32_t {
148 FUTEX_UNLOCKED = 0,
149 FUTEX_LOCKED_UNCONTENDED = 1, // user-space locking
150 FUTEX_LOCKED_CONTENDED = 2, // futex locking
151 };
152 }
153
lock()154 int C2SyncVariables::lock() {
155 uint32_t old = FUTEX_UNLOCKED;
156
157 // see if we can lock uncontended immediately (if previously unlocked)
158 if (mLock.compare_exchange_strong(old, FUTEX_LOCKED_UNCONTENDED)) {
159 return 0;
160 }
161
162 // spin to see if we can get it with a short wait without involving kernel
163 for (int i = 0; i < kSpinNumForLock; i++) {
164 sched_yield();
165
166 old = FUTEX_UNLOCKED;
167 if (mLock.compare_exchange_strong(old, FUTEX_LOCKED_UNCONTENDED)) {
168 return 0;
169 }
170 }
171
172 // still locked, if other side thinks it was uncontended, now it is contended, so let them
173 // know that they need to wake us up.
174 if (old == FUTEX_LOCKED_UNCONTENDED) {
175 old = mLock.exchange(FUTEX_LOCKED_CONTENDED);
176 // It is possible that the other holder released the lock at this very moment (and old
177 // becomes UNLOCKED), If so, we will not involve the kernel to wait for the lock to be
178 // released, but are still marking our lock contended (even though we are the only
179 // holders.)
180 }
181
182 // while the futex is still locked by someone else
183 while (old != FUTEX_UNLOCKED) {
184 // wait until other side releases the lock (and still contented)
185 (void)syscall(__NR_futex, &mLock, FUTEX_WAIT, FUTEX_LOCKED_CONTENDED, NULL, NULL, 0);
186 // try to relock
187 old = mLock.exchange(FUTEX_LOCKED_CONTENDED);
188 }
189 return 0;
190 }
191
unlock()192 int C2SyncVariables::unlock() {
193 // TRICKY: here we assume that we are holding this lock
194
195 // unlock the lock immediately (since we were holding it)
196 // If it is (still) locked uncontested, we are done (no need to involve the kernel)
197 if (mLock.exchange(FUTEX_UNLOCKED) == FUTEX_LOCKED_UNCONTENDED) {
198 return 0;
199 }
200
201 // We don't need to spin for unlock as here we know already we have a waiter who we need to
202 // wake up. This code was here in case someone just happened to lock this lock (uncontested)
203 // before we would wake up other waiters to avoid a syscall. It is unsure if this ever gets
204 // exercised or if this is the behavior we want. (Note that if this code is removed, the same
205 // situation is still handled in lock() by the woken up waiter that realizes that the lock is
206 // now taken.)
207 for (int i = 0; i < kSpinNumForUnlock; i++) {
208 // here we seem to check if someone relocked this lock, and if they relocked uncontested,
209 // we up it to contested (since there are other waiters.)
210 if (mLock.load() != FUTEX_UNLOCKED) {
211 uint32_t old = FUTEX_LOCKED_UNCONTENDED;
212 mLock.compare_exchange_strong(old, FUTEX_LOCKED_CONTENDED);
213 // this is always true here so we return immediately
214 if (old) {
215 return 0;
216 }
217 }
218 sched_yield();
219 }
220
221 // wake up one waiter
222 (void)syscall(__NR_futex, &mLock, FUTEX_WAKE, 1, NULL, NULL, 0);
223 return 0;
224 }
225
setInitialDequeueCountLocked(int32_t maxDequeueCount,int32_t curDequeueCount)226 void C2SyncVariables::setInitialDequeueCountLocked(
227 int32_t maxDequeueCount, int32_t curDequeueCount) {
228 mMaxDequeueCount = maxDequeueCount;
229 mCurDequeueCount = curDequeueCount;
230 }
231
getWaitIdLocked()232 uint32_t C2SyncVariables::getWaitIdLocked() {
233 return mCond.load();
234 }
235
isDequeueableLocked(uint32_t * waitId)236 bool C2SyncVariables::isDequeueableLocked(uint32_t *waitId) {
237 if (mMaxDequeueCount <= mCurDequeueCount) {
238 if (waitId) {
239 *waitId = getWaitIdLocked();
240 }
241 return false;
242 }
243 return true;
244 }
245
notifyQueuedLocked(uint32_t * waitId,bool notify)246 bool C2SyncVariables::notifyQueuedLocked(uint32_t *waitId, bool notify) {
247 // Note. thundering herds may occur. Edge trigged signalling.
248 // But one waiter will guarantee to dequeue. others may wait again.
249 // Minimize futex syscall(trap) for the main use case(one waiter case).
250 if (mMaxDequeueCount == mCurDequeueCount--) {
251 if (notify) {
252 broadcast();
253 }
254 return true;
255 }
256
257 if (mCurDequeueCount >= mMaxDequeueCount) {
258 if (waitId) {
259 *waitId = getWaitIdLocked();
260 }
261 ALOGV("dequeue blocked %d/%d", mCurDequeueCount, mMaxDequeueCount);
262 return false;
263 }
264 return true;
265 }
266
notifyDequeuedLocked()267 void C2SyncVariables::notifyDequeuedLocked() {
268 mCurDequeueCount++;
269 ALOGV("dequeue successful %d/%d", mCurDequeueCount, mMaxDequeueCount);
270 }
271
setSyncStatusLocked(SyncStatus status)272 void C2SyncVariables::setSyncStatusLocked(SyncStatus status) {
273 mStatus = status;
274 if (mStatus == STATUS_ACTIVE) {
275 broadcast();
276 }
277 }
278
getSyncStatusLocked()279 C2SyncVariables::SyncStatus C2SyncVariables::getSyncStatusLocked() {
280 return mStatus;
281 }
282
updateMaxDequeueCountLocked(int32_t maxDequeueCount)283 void C2SyncVariables::updateMaxDequeueCountLocked(int32_t maxDequeueCount) {
284 mMaxDequeueCount = maxDequeueCount;
285 if (mStatus == STATUS_ACTIVE) {
286 broadcast();
287 }
288 }
289
waitForChange(uint32_t waitId,c2_nsecs_t timeoutNs)290 c2_status_t C2SyncVariables::waitForChange(uint32_t waitId, c2_nsecs_t timeoutNs) {
291 if (timeoutNs < 0) {
292 timeoutNs = 0;
293 }
294 struct timespec tv;
295 tv.tv_sec = timeoutNs / 1000000000;
296 tv.tv_nsec = timeoutNs % 1000000000;
297
298 int ret = syscall(__NR_futex, &mCond, FUTEX_WAIT, waitId, &tv, NULL, 0);
299 if (ret == 0 || errno == EAGAIN) {
300 return C2_OK;
301 }
302 if (errno == EINTR || errno == ETIMEDOUT) {
303 return C2_TIMED_OUT;
304 }
305 return C2_BAD_VALUE;
306 }
307
notifyAll()308 void C2SyncVariables::notifyAll() {
309 this->lock();
310 this->broadcast();
311 this->unlock();
312 }
313
invalidate()314 void C2SyncVariables::invalidate() {
315 mCond++;
316 (void) syscall(__NR_futex, &mCond, FUTEX_REQUEUE, INT_MAX, (void *)INT_MAX, &mLock, 0);
317 }
318
clearLockIfNecessary()319 void C2SyncVariables::clearLockIfNecessary() {
320 // Note: After waiting for 30ms without acquiring the lock,
321 // we will consider the lock is dangling.
322 // Since the lock duration is very brief to manage the counter,
323 // waiting for 30ms should be more than enough.
324 constexpr size_t kTestLockDurationMs = 30;
325
326 bool locked = tryLockFor(kTestLockDurationMs);
327 unlock();
328
329 if (!locked) {
330 ALOGW("A dead process might be holding the lock");
331 }
332 }
333
signal()334 int C2SyncVariables::signal() {
335 mCond++;
336
337 (void) syscall(__NR_futex, &mCond, FUTEX_WAKE, 1, NULL, NULL, 0);
338 return 0;
339 }
340
broadcast()341 int C2SyncVariables::broadcast() {
342 mCond++;
343
344 (void) syscall(__NR_futex, &mCond, FUTEX_REQUEUE, 1, (void *)INT_MAX, &mLock, 0);
345 return 0;
346 }
347
wait()348 int C2SyncVariables::wait() {
349 uint32_t old = mCond.load();
350 unlock();
351
352 (void) syscall(__NR_futex, &mCond, FUTEX_WAIT, old, NULL, NULL, 0);
353 while (mLock.exchange(FUTEX_LOCKED_CONTENDED)) {
354 (void) syscall(__NR_futex, &mLock, FUTEX_WAIT, FUTEX_LOCKED_CONTENDED, NULL, NULL, 0);
355 }
356 return 0;
357 }
358
tryLockFor(size_t ms)359 bool C2SyncVariables::tryLockFor(size_t ms) {
360 uint32_t old = FUTEX_UNLOCKED;
361
362 if (mLock.compare_exchange_strong(old, FUTEX_LOCKED_UNCONTENDED)) {
363 return true;
364 }
365
366 if (old == FUTEX_LOCKED_UNCONTENDED) {
367 old = mLock.exchange(FUTEX_LOCKED_CONTENDED);
368 }
369
370 struct timespec wait{
371 static_cast<time_t>(ms / 1000),
372 static_cast<long>((ms % 1000) * 1000000)};
373 struct timespec end;
374 clock_gettime(CLOCK_REALTIME, &end);
375 timespec_add_ms(end, ms);
376
377 while (old != FUTEX_UNLOCKED) { // case of EINTR being returned;
378 (void)syscall(__NR_futex, &mLock, FUTEX_WAIT, FUTEX_LOCKED_CONTENDED, &wait, NULL, 0);
379 old = mLock.exchange(FUTEX_LOCKED_CONTENDED);
380
381 struct timespec now;
382 clock_gettime(CLOCK_REALTIME, &now);
383 if (timespec_compare(now, end) >= 0) {
384 break;
385 }
386 }
387
388 return old == FUTEX_UNLOCKED;
389 }
390